aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/blk-cgroup.c2
-rw-r--r--block/blk-core.c33
-rw-r--r--block/blk-ioc.c228
-rw-r--r--block/blk-merge.c37
-rw-r--r--block/blk-softirq.c16
-rw-r--r--block/blk.h18
-rw-r--r--block/bsg.c3
-rw-r--r--block/cfq-iosched.c36
-rw-r--r--block/elevator.c55
-rw-r--r--block/genhd.c42
-rw-r--r--block/partition-generic.c48
-rw-r--r--block/partitions/ldm.c11
12 files changed, 258 insertions, 271 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 1359d637831f..ea84a23d5e68 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -1653,7 +1653,7 @@ static void blkiocg_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
1653 ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE); 1653 ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
1654 if (ioc) { 1654 if (ioc) {
1655 ioc_cgroup_changed(ioc); 1655 ioc_cgroup_changed(ioc);
1656 put_io_context(ioc, NULL); 1656 put_io_context(ioc);
1657 } 1657 }
1658 } 1658 }
1659} 1659}
diff --git a/block/blk-core.c b/block/blk-core.c
index e6c05a97ee2b..3a78b00edd71 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -642,7 +642,7 @@ static inline void blk_free_request(struct request_queue *q, struct request *rq)
642 if (rq->cmd_flags & REQ_ELVPRIV) { 642 if (rq->cmd_flags & REQ_ELVPRIV) {
643 elv_put_request(q, rq); 643 elv_put_request(q, rq);
644 if (rq->elv.icq) 644 if (rq->elv.icq)
645 put_io_context(rq->elv.icq->ioc, q); 645 put_io_context(rq->elv.icq->ioc);
646 } 646 }
647 647
648 mempool_free(rq, q->rq.rq_pool); 648 mempool_free(rq, q->rq.rq_pool);
@@ -872,13 +872,15 @@ retry:
872 spin_unlock_irq(q->queue_lock); 872 spin_unlock_irq(q->queue_lock);
873 873
874 /* create icq if missing */ 874 /* create icq if missing */
875 if (unlikely(et->icq_cache && !icq)) 875 if ((rw_flags & REQ_ELVPRIV) && unlikely(et->icq_cache && !icq)) {
876 icq = ioc_create_icq(q, gfp_mask); 876 icq = ioc_create_icq(q, gfp_mask);
877 if (!icq)
878 goto fail_icq;
879 }
877 880
878 /* rqs are guaranteed to have icq on elv_set_request() if requested */ 881 rq = blk_alloc_request(q, icq, rw_flags, gfp_mask);
879 if (likely(!et->icq_cache || icq))
880 rq = blk_alloc_request(q, icq, rw_flags, gfp_mask);
881 882
883fail_icq:
882 if (unlikely(!rq)) { 884 if (unlikely(!rq)) {
883 /* 885 /*
884 * Allocation failed presumably due to memory. Undo anything 886 * Allocation failed presumably due to memory. Undo anything
@@ -1210,7 +1212,6 @@ static bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
1210 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); 1212 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
1211 1213
1212 drive_stat_acct(req, 0); 1214 drive_stat_acct(req, 0);
1213 elv_bio_merged(q, req, bio);
1214 return true; 1215 return true;
1215} 1216}
1216 1217
@@ -1241,7 +1242,6 @@ static bool bio_attempt_front_merge(struct request_queue *q,
1241 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); 1242 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
1242 1243
1243 drive_stat_acct(req, 0); 1244 drive_stat_acct(req, 0);
1244 elv_bio_merged(q, req, bio);
1245 return true; 1245 return true;
1246} 1246}
1247 1247
@@ -1255,13 +1255,12 @@ static bool bio_attempt_front_merge(struct request_queue *q,
1255 * on %current's plugged list. Returns %true if merge was successful, 1255 * on %current's plugged list. Returns %true if merge was successful,
1256 * otherwise %false. 1256 * otherwise %false.
1257 * 1257 *
1258 * This function is called without @q->queue_lock; however, elevator is 1258 * Plugging coalesces IOs from the same issuer for the same purpose without
1259 * accessed iff there already are requests on the plugged list which in 1259 * going through @q->queue_lock. As such it's more of an issuing mechanism
1260 * turn guarantees validity of the elevator. 1260 * than scheduling, and the request, while may have elvpriv data, is not
1261 * 1261 * added on the elevator at this point. In addition, we don't have
1262 * Note that, on successful merge, elevator operation 1262 * reliable access to the elevator outside queue lock. Only check basic
1263 * elevator_bio_merged_fn() will be called without queue lock. Elevator 1263 * merging parameters without querying the elevator.
1264 * must be ready for this.
1265 */ 1264 */
1266static bool attempt_plug_merge(struct request_queue *q, struct bio *bio, 1265static bool attempt_plug_merge(struct request_queue *q, struct bio *bio,
1267 unsigned int *request_count) 1266 unsigned int *request_count)
@@ -1280,10 +1279,10 @@ static bool attempt_plug_merge(struct request_queue *q, struct bio *bio,
1280 1279
1281 (*request_count)++; 1280 (*request_count)++;
1282 1281
1283 if (rq->q != q) 1282 if (rq->q != q || !blk_rq_merge_ok(rq, bio))
1284 continue; 1283 continue;
1285 1284
1286 el_ret = elv_try_merge(rq, bio); 1285 el_ret = blk_try_merge(rq, bio);
1287 if (el_ret == ELEVATOR_BACK_MERGE) { 1286 if (el_ret == ELEVATOR_BACK_MERGE) {
1288 ret = bio_attempt_back_merge(q, rq, bio); 1287 ret = bio_attempt_back_merge(q, rq, bio);
1289 if (ret) 1288 if (ret)
@@ -1345,12 +1344,14 @@ void blk_queue_bio(struct request_queue *q, struct bio *bio)
1345 el_ret = elv_merge(q, &req, bio); 1344 el_ret = elv_merge(q, &req, bio);
1346 if (el_ret == ELEVATOR_BACK_MERGE) { 1345 if (el_ret == ELEVATOR_BACK_MERGE) {
1347 if (bio_attempt_back_merge(q, req, bio)) { 1346 if (bio_attempt_back_merge(q, req, bio)) {
1347 elv_bio_merged(q, req, bio);
1348 if (!attempt_back_merge(q, req)) 1348 if (!attempt_back_merge(q, req))
1349 elv_merged_request(q, req, el_ret); 1349 elv_merged_request(q, req, el_ret);
1350 goto out_unlock; 1350 goto out_unlock;
1351 } 1351 }
1352 } else if (el_ret == ELEVATOR_FRONT_MERGE) { 1352 } else if (el_ret == ELEVATOR_FRONT_MERGE) {
1353 if (bio_attempt_front_merge(q, req, bio)) { 1353 if (bio_attempt_front_merge(q, req, bio)) {
1354 elv_bio_merged(q, req, bio);
1354 if (!attempt_front_merge(q, req)) 1355 if (!attempt_front_merge(q, req))
1355 elv_merged_request(q, req, el_ret); 1356 elv_merged_request(q, req, el_ret);
1356 goto out_unlock; 1357 goto out_unlock;
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index 27a06e00eaec..fb95dd2f889a 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -29,21 +29,6 @@ void get_io_context(struct io_context *ioc)
29} 29}
30EXPORT_SYMBOL(get_io_context); 30EXPORT_SYMBOL(get_io_context);
31 31
32/*
33 * Releasing ioc may nest into another put_io_context() leading to nested
34 * fast path release. As the ioc's can't be the same, this is okay but
35 * makes lockdep whine. Keep track of nesting and use it as subclass.
36 */
37#ifdef CONFIG_LOCKDEP
38#define ioc_release_depth(q) ((q) ? (q)->ioc_release_depth : 0)
39#define ioc_release_depth_inc(q) (q)->ioc_release_depth++
40#define ioc_release_depth_dec(q) (q)->ioc_release_depth--
41#else
42#define ioc_release_depth(q) 0
43#define ioc_release_depth_inc(q) do { } while (0)
44#define ioc_release_depth_dec(q) do { } while (0)
45#endif
46
47static void icq_free_icq_rcu(struct rcu_head *head) 32static void icq_free_icq_rcu(struct rcu_head *head)
48{ 33{
49 struct io_cq *icq = container_of(head, struct io_cq, __rcu_head); 34 struct io_cq *icq = container_of(head, struct io_cq, __rcu_head);
@@ -51,11 +36,23 @@ static void icq_free_icq_rcu(struct rcu_head *head)
51 kmem_cache_free(icq->__rcu_icq_cache, icq); 36 kmem_cache_free(icq->__rcu_icq_cache, icq);
52} 37}
53 38
54/* 39/* Exit an icq. Called with both ioc and q locked. */
55 * Exit and free an icq. Called with both ioc and q locked.
56 */
57static void ioc_exit_icq(struct io_cq *icq) 40static void ioc_exit_icq(struct io_cq *icq)
58{ 41{
42 struct elevator_type *et = icq->q->elevator->type;
43
44 if (icq->flags & ICQ_EXITED)
45 return;
46
47 if (et->ops.elevator_exit_icq_fn)
48 et->ops.elevator_exit_icq_fn(icq);
49
50 icq->flags |= ICQ_EXITED;
51}
52
53/* Release an icq. Called with both ioc and q locked. */
54static void ioc_destroy_icq(struct io_cq *icq)
55{
59 struct io_context *ioc = icq->ioc; 56 struct io_context *ioc = icq->ioc;
60 struct request_queue *q = icq->q; 57 struct request_queue *q = icq->q;
61 struct elevator_type *et = q->elevator->type; 58 struct elevator_type *et = q->elevator->type;
@@ -75,11 +72,7 @@ static void ioc_exit_icq(struct io_cq *icq)
75 if (rcu_dereference_raw(ioc->icq_hint) == icq) 72 if (rcu_dereference_raw(ioc->icq_hint) == icq)
76 rcu_assign_pointer(ioc->icq_hint, NULL); 73 rcu_assign_pointer(ioc->icq_hint, NULL);
77 74
78 if (et->ops.elevator_exit_icq_fn) { 75 ioc_exit_icq(icq);
79 ioc_release_depth_inc(q);
80 et->ops.elevator_exit_icq_fn(icq);
81 ioc_release_depth_dec(q);
82 }
83 76
84 /* 77 /*
85 * @icq->q might have gone away by the time RCU callback runs 78 * @icq->q might have gone away by the time RCU callback runs
@@ -97,51 +90,32 @@ static void ioc_release_fn(struct work_struct *work)
97{ 90{
98 struct io_context *ioc = container_of(work, struct io_context, 91 struct io_context *ioc = container_of(work, struct io_context,
99 release_work); 92 release_work);
100 struct request_queue *last_q = NULL; 93 unsigned long flags;
101 94
102 spin_lock_irq(&ioc->lock); 95 /*
96 * Exiting icq may call into put_io_context() through elevator
97 * which will trigger lockdep warning. The ioc's are guaranteed to
98 * be different, use a different locking subclass here. Use
99 * irqsave variant as there's no spin_lock_irq_nested().
100 */
101 spin_lock_irqsave_nested(&ioc->lock, flags, 1);
103 102
104 while (!hlist_empty(&ioc->icq_list)) { 103 while (!hlist_empty(&ioc->icq_list)) {
105 struct io_cq *icq = hlist_entry(ioc->icq_list.first, 104 struct io_cq *icq = hlist_entry(ioc->icq_list.first,
106 struct io_cq, ioc_node); 105 struct io_cq, ioc_node);
107 struct request_queue *this_q = icq->q; 106 struct request_queue *q = icq->q;
108 107
109 if (this_q != last_q) { 108 if (spin_trylock(q->queue_lock)) {
110 /* 109 ioc_destroy_icq(icq);
111 * Need to switch to @this_q. Once we release 110 spin_unlock(q->queue_lock);
112 * @ioc->lock, it can go away along with @cic. 111 } else {
113 * Hold on to it. 112 spin_unlock_irqrestore(&ioc->lock, flags);
114 */ 113 cpu_relax();
115 __blk_get_queue(this_q); 114 spin_lock_irqsave_nested(&ioc->lock, flags, 1);
116
117 /*
118 * blk_put_queue() might sleep thanks to kobject
119 * idiocy. Always release both locks, put and
120 * restart.
121 */
122 if (last_q) {
123 spin_unlock(last_q->queue_lock);
124 spin_unlock_irq(&ioc->lock);
125 blk_put_queue(last_q);
126 } else {
127 spin_unlock_irq(&ioc->lock);
128 }
129
130 last_q = this_q;
131 spin_lock_irq(this_q->queue_lock);
132 spin_lock(&ioc->lock);
133 continue;
134 } 115 }
135 ioc_exit_icq(icq);
136 } 116 }
137 117
138 if (last_q) { 118 spin_unlock_irqrestore(&ioc->lock, flags);
139 spin_unlock(last_q->queue_lock);
140 spin_unlock_irq(&ioc->lock);
141 blk_put_queue(last_q);
142 } else {
143 spin_unlock_irq(&ioc->lock);
144 }
145 119
146 kmem_cache_free(iocontext_cachep, ioc); 120 kmem_cache_free(iocontext_cachep, ioc);
147} 121}
@@ -149,79 +123,35 @@ static void ioc_release_fn(struct work_struct *work)
149/** 123/**
150 * put_io_context - put a reference of io_context 124 * put_io_context - put a reference of io_context
151 * @ioc: io_context to put 125 * @ioc: io_context to put
152 * @locked_q: request_queue the caller is holding queue_lock of (hint)
153 * 126 *
154 * Decrement reference count of @ioc and release it if the count reaches 127 * Decrement reference count of @ioc and release it if the count reaches
155 * zero. If the caller is holding queue_lock of a queue, it can indicate 128 * zero.
156 * that with @locked_q. This is an optimization hint and the caller is
157 * allowed to pass in %NULL even when it's holding a queue_lock.
158 */ 129 */
159void put_io_context(struct io_context *ioc, struct request_queue *locked_q) 130void put_io_context(struct io_context *ioc)
160{ 131{
161 struct request_queue *last_q = locked_q;
162 unsigned long flags; 132 unsigned long flags;
133 bool free_ioc = false;
163 134
164 if (ioc == NULL) 135 if (ioc == NULL)
165 return; 136 return;
166 137
167 BUG_ON(atomic_long_read(&ioc->refcount) <= 0); 138 BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
168 if (locked_q)
169 lockdep_assert_held(locked_q->queue_lock);
170
171 if (!atomic_long_dec_and_test(&ioc->refcount))
172 return;
173 139
174 /* 140 /*
175 * Destroy @ioc. This is a bit messy because icq's are chained 141 * Releasing ioc requires reverse order double locking and we may
176 * from both ioc and queue, and ioc->lock nests inside queue_lock. 142 * already be holding a queue_lock. Do it asynchronously from wq.
177 * The inner ioc->lock should be held to walk our icq_list and then
178 * for each icq the outer matching queue_lock should be grabbed.
179 * ie. We need to do reverse-order double lock dancing.
180 *
181 * Another twist is that we are often called with one of the
182 * matching queue_locks held as indicated by @locked_q, which
183 * prevents performing double-lock dance for other queues.
184 *
185 * So, we do it in two stages. The fast path uses the queue_lock
186 * the caller is holding and, if other queues need to be accessed,
187 * uses trylock to avoid introducing locking dependency. This can
188 * handle most cases, especially if @ioc was performing IO on only
189 * single device.
190 *
191 * If trylock doesn't cut it, we defer to @ioc->release_work which
192 * can do all the double-locking dancing.
193 */ 143 */
194 spin_lock_irqsave_nested(&ioc->lock, flags, 144 if (atomic_long_dec_and_test(&ioc->refcount)) {
195 ioc_release_depth(locked_q)); 145 spin_lock_irqsave(&ioc->lock, flags);
196 146 if (!hlist_empty(&ioc->icq_list))
197 while (!hlist_empty(&ioc->icq_list)) { 147 schedule_work(&ioc->release_work);
198 struct io_cq *icq = hlist_entry(ioc->icq_list.first, 148 else
199 struct io_cq, ioc_node); 149 free_ioc = true;
200 struct request_queue *this_q = icq->q; 150 spin_unlock_irqrestore(&ioc->lock, flags);
201
202 if (this_q != last_q) {
203 if (last_q && last_q != locked_q)
204 spin_unlock(last_q->queue_lock);
205 last_q = NULL;
206
207 if (!spin_trylock(this_q->queue_lock))
208 break;
209 last_q = this_q;
210 continue;
211 }
212 ioc_exit_icq(icq);
213 } 151 }
214 152
215 if (last_q && last_q != locked_q) 153 if (free_ioc)
216 spin_unlock(last_q->queue_lock);
217
218 spin_unlock_irqrestore(&ioc->lock, flags);
219
220 /* if no icq is left, we're done; otherwise, kick release_work */
221 if (hlist_empty(&ioc->icq_list))
222 kmem_cache_free(iocontext_cachep, ioc); 154 kmem_cache_free(iocontext_cachep, ioc);
223 else
224 schedule_work(&ioc->release_work);
225} 155}
226EXPORT_SYMBOL(put_io_context); 156EXPORT_SYMBOL(put_io_context);
227 157
@@ -229,14 +159,42 @@ EXPORT_SYMBOL(put_io_context);
229void exit_io_context(struct task_struct *task) 159void exit_io_context(struct task_struct *task)
230{ 160{
231 struct io_context *ioc; 161 struct io_context *ioc;
162 struct io_cq *icq;
163 struct hlist_node *n;
164 unsigned long flags;
232 165
233 task_lock(task); 166 task_lock(task);
234 ioc = task->io_context; 167 ioc = task->io_context;
235 task->io_context = NULL; 168 task->io_context = NULL;
236 task_unlock(task); 169 task_unlock(task);
237 170
238 atomic_dec(&ioc->nr_tasks); 171 if (!atomic_dec_and_test(&ioc->nr_tasks)) {
239 put_io_context(ioc, NULL); 172 put_io_context(ioc);
173 return;
174 }
175
176 /*
177 * Need ioc lock to walk icq_list and q lock to exit icq. Perform
178 * reverse double locking. Read comment in ioc_release_fn() for
179 * explanation on the nested locking annotation.
180 */
181retry:
182 spin_lock_irqsave_nested(&ioc->lock, flags, 1);
183 hlist_for_each_entry(icq, n, &ioc->icq_list, ioc_node) {
184 if (icq->flags & ICQ_EXITED)
185 continue;
186 if (spin_trylock(icq->q->queue_lock)) {
187 ioc_exit_icq(icq);
188 spin_unlock(icq->q->queue_lock);
189 } else {
190 spin_unlock_irqrestore(&ioc->lock, flags);
191 cpu_relax();
192 goto retry;
193 }
194 }
195 spin_unlock_irqrestore(&ioc->lock, flags);
196
197 put_io_context(ioc);
240} 198}
241 199
242/** 200/**
@@ -255,7 +213,7 @@ void ioc_clear_queue(struct request_queue *q)
255 struct io_context *ioc = icq->ioc; 213 struct io_context *ioc = icq->ioc;
256 214
257 spin_lock(&ioc->lock); 215 spin_lock(&ioc->lock);
258 ioc_exit_icq(icq); 216 ioc_destroy_icq(icq);
259 spin_unlock(&ioc->lock); 217 spin_unlock(&ioc->lock);
260 } 218 }
261} 219}
@@ -424,13 +382,13 @@ struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask)
424 return icq; 382 return icq;
425} 383}
426 384
427void ioc_set_changed(struct io_context *ioc, int which) 385void ioc_set_icq_flags(struct io_context *ioc, unsigned int flags)
428{ 386{
429 struct io_cq *icq; 387 struct io_cq *icq;
430 struct hlist_node *n; 388 struct hlist_node *n;
431 389
432 hlist_for_each_entry(icq, n, &ioc->icq_list, ioc_node) 390 hlist_for_each_entry(icq, n, &ioc->icq_list, ioc_node)
433 set_bit(which, &icq->changed); 391 icq->flags |= flags;
434} 392}
435 393
436/** 394/**
@@ -448,7 +406,7 @@ void ioc_ioprio_changed(struct io_context *ioc, int ioprio)
448 406
449 spin_lock_irqsave(&ioc->lock, flags); 407 spin_lock_irqsave(&ioc->lock, flags);
450 ioc->ioprio = ioprio; 408 ioc->ioprio = ioprio;
451 ioc_set_changed(ioc, ICQ_IOPRIO_CHANGED); 409 ioc_set_icq_flags(ioc, ICQ_IOPRIO_CHANGED);
452 spin_unlock_irqrestore(&ioc->lock, flags); 410 spin_unlock_irqrestore(&ioc->lock, flags);
453} 411}
454 412
@@ -465,11 +423,33 @@ void ioc_cgroup_changed(struct io_context *ioc)
465 unsigned long flags; 423 unsigned long flags;
466 424
467 spin_lock_irqsave(&ioc->lock, flags); 425 spin_lock_irqsave(&ioc->lock, flags);
468 ioc_set_changed(ioc, ICQ_CGROUP_CHANGED); 426 ioc_set_icq_flags(ioc, ICQ_CGROUP_CHANGED);
469 spin_unlock_irqrestore(&ioc->lock, flags); 427 spin_unlock_irqrestore(&ioc->lock, flags);
470} 428}
471EXPORT_SYMBOL(ioc_cgroup_changed); 429EXPORT_SYMBOL(ioc_cgroup_changed);
472 430
431/**
432 * icq_get_changed - fetch and clear icq changed mask
433 * @icq: icq of interest
434 *
435 * Fetch and clear ICQ_*_CHANGED bits from @icq. Grabs and releases
436 * @icq->ioc->lock.
437 */
438unsigned icq_get_changed(struct io_cq *icq)
439{
440 unsigned int changed = 0;
441 unsigned long flags;
442
443 if (unlikely(icq->flags & ICQ_CHANGED_MASK)) {
444 spin_lock_irqsave(&icq->ioc->lock, flags);
445 changed = icq->flags & ICQ_CHANGED_MASK;
446 icq->flags &= ~ICQ_CHANGED_MASK;
447 spin_unlock_irqrestore(&icq->ioc->lock, flags);
448 }
449 return changed;
450}
451EXPORT_SYMBOL(icq_get_changed);
452
473static int __init blk_ioc_init(void) 453static int __init blk_ioc_init(void)
474{ 454{
475 iocontext_cachep = kmem_cache_create("blkdev_ioc", 455 iocontext_cachep = kmem_cache_create("blkdev_ioc",
diff --git a/block/blk-merge.c b/block/blk-merge.c
index cfcc37cb222b..160035f54882 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -471,3 +471,40 @@ int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
471{ 471{
472 return attempt_merge(q, rq, next); 472 return attempt_merge(q, rq, next);
473} 473}
474
475bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
476{
477 if (!rq_mergeable(rq))
478 return false;
479
480 /* don't merge file system requests and discard requests */
481 if ((bio->bi_rw & REQ_DISCARD) != (rq->bio->bi_rw & REQ_DISCARD))
482 return false;
483
484 /* don't merge discard requests and secure discard requests */
485 if ((bio->bi_rw & REQ_SECURE) != (rq->bio->bi_rw & REQ_SECURE))
486 return false;
487
488 /* different data direction or already started, don't merge */
489 if (bio_data_dir(bio) != rq_data_dir(rq))
490 return false;
491
492 /* must be same device and not a special request */
493 if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special)
494 return false;
495
496 /* only merge integrity protected bio into ditto rq */
497 if (bio_integrity(bio) != blk_integrity_rq(rq))
498 return false;
499
500 return true;
501}
502
503int blk_try_merge(struct request *rq, struct bio *bio)
504{
505 if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_sector)
506 return ELEVATOR_BACK_MERGE;
507 else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_sector)
508 return ELEVATOR_FRONT_MERGE;
509 return ELEVATOR_NO_MERGE;
510}
diff --git a/block/blk-softirq.c b/block/blk-softirq.c
index 1366a89d8e66..467c8de88642 100644
--- a/block/blk-softirq.c
+++ b/block/blk-softirq.c
@@ -8,6 +8,7 @@
8#include <linux/blkdev.h> 8#include <linux/blkdev.h>
9#include <linux/interrupt.h> 9#include <linux/interrupt.h>
10#include <linux/cpu.h> 10#include <linux/cpu.h>
11#include <linux/sched.h>
11 12
12#include "blk.h" 13#include "blk.h"
13 14
@@ -103,9 +104,10 @@ static struct notifier_block __cpuinitdata blk_cpu_notifier = {
103 104
104void __blk_complete_request(struct request *req) 105void __blk_complete_request(struct request *req)
105{ 106{
106 int ccpu, cpu, group_cpu = NR_CPUS; 107 int ccpu, cpu;
107 struct request_queue *q = req->q; 108 struct request_queue *q = req->q;
108 unsigned long flags; 109 unsigned long flags;
110 bool shared = false;
109 111
110 BUG_ON(!q->softirq_done_fn); 112 BUG_ON(!q->softirq_done_fn);
111 113
@@ -117,22 +119,20 @@ void __blk_complete_request(struct request *req)
117 */ 119 */
118 if (req->cpu != -1) { 120 if (req->cpu != -1) {
119 ccpu = req->cpu; 121 ccpu = req->cpu;
120 if (!test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags)) { 122 if (!test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags))
121 ccpu = blk_cpu_to_group(ccpu); 123 shared = cpus_share_cache(cpu, ccpu);
122 group_cpu = blk_cpu_to_group(cpu);
123 }
124 } else 124 } else
125 ccpu = cpu; 125 ccpu = cpu;
126 126
127 /* 127 /*
128 * If current CPU and requested CPU are in the same group, running 128 * If current CPU and requested CPU share a cache, run the softirq on
129 * softirq in current CPU. One might concern this is just like 129 * the current CPU. One might concern this is just like
130 * QUEUE_FLAG_SAME_FORCE, but actually not. blk_complete_request() is 130 * QUEUE_FLAG_SAME_FORCE, but actually not. blk_complete_request() is
131 * running in interrupt handler, and currently I/O controller doesn't 131 * running in interrupt handler, and currently I/O controller doesn't
132 * support multiple interrupts, so current CPU is unique actually. This 132 * support multiple interrupts, so current CPU is unique actually. This
133 * avoids IPI sending from current CPU to the first CPU of a group. 133 * avoids IPI sending from current CPU to the first CPU of a group.
134 */ 134 */
135 if (ccpu == cpu || ccpu == group_cpu) { 135 if (ccpu == cpu || shared) {
136 struct list_head *list; 136 struct list_head *list;
137do_local: 137do_local:
138 list = &__get_cpu_var(blk_cpu_done); 138 list = &__get_cpu_var(blk_cpu_done);
diff --git a/block/blk.h b/block/blk.h
index 7efd772336de..d45be871329e 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -137,6 +137,8 @@ int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
137 struct request *next); 137 struct request *next);
138void blk_recalc_rq_segments(struct request *rq); 138void blk_recalc_rq_segments(struct request *rq);
139void blk_rq_set_mixed_merge(struct request *rq); 139void blk_rq_set_mixed_merge(struct request *rq);
140bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
141int blk_try_merge(struct request *rq, struct bio *bio);
140 142
141void blk_queue_congestion_threshold(struct request_queue *q); 143void blk_queue_congestion_threshold(struct request_queue *q);
142 144
@@ -164,22 +166,6 @@ static inline int queue_congestion_off_threshold(struct request_queue *q)
164 return q->nr_congestion_off; 166 return q->nr_congestion_off;
165} 167}
166 168
167static inline int blk_cpu_to_group(int cpu)
168{
169 int group = NR_CPUS;
170#ifdef CONFIG_SCHED_MC
171 const struct cpumask *mask = cpu_coregroup_mask(cpu);
172 group = cpumask_first(mask);
173#elif defined(CONFIG_SCHED_SMT)
174 group = cpumask_first(topology_thread_cpumask(cpu));
175#else
176 return cpu;
177#endif
178 if (likely(group < NR_CPUS))
179 return group;
180 return cpu;
181}
182
183/* 169/*
184 * Contribute to IO statistics IFF: 170 * Contribute to IO statistics IFF:
185 * 171 *
diff --git a/block/bsg.c b/block/bsg.c
index 4cf703fd98bb..ff64ae3bacee 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -983,7 +983,8 @@ void bsg_unregister_queue(struct request_queue *q)
983 983
984 mutex_lock(&bsg_mutex); 984 mutex_lock(&bsg_mutex);
985 idr_remove(&bsg_minor_idr, bcd->minor); 985 idr_remove(&bsg_minor_idr, bcd->minor);
986 sysfs_remove_link(&q->kobj, "bsg"); 986 if (q->kobj.sd)
987 sysfs_remove_link(&q->kobj, "bsg");
987 device_unregister(bcd->class_dev); 988 device_unregister(bcd->class_dev);
988 bcd->class_dev = NULL; 989 bcd->class_dev = NULL;
989 kref_put(&bcd->ref, bsg_kref_release_function); 990 kref_put(&bcd->ref, bsg_kref_release_function);
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index ee55019066a1..457295253566 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1699,18 +1699,11 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq,
1699 1699
1700 /* 1700 /*
1701 * Lookup the cfqq that this bio will be queued with and allow 1701 * Lookup the cfqq that this bio will be queued with and allow
1702 * merge only if rq is queued there. This function can be called 1702 * merge only if rq is queued there.
1703 * from plug merge without queue_lock. In such cases, ioc of @rq
1704 * and %current are guaranteed to be equal. Avoid lookup which
1705 * requires queue_lock by using @rq's cic.
1706 */ 1703 */
1707 if (current->io_context == RQ_CIC(rq)->icq.ioc) { 1704 cic = cfq_cic_lookup(cfqd, current->io_context);
1708 cic = RQ_CIC(rq); 1705 if (!cic)
1709 } else { 1706 return false;
1710 cic = cfq_cic_lookup(cfqd, current->io_context);
1711 if (!cic)
1712 return false;
1713 }
1714 1707
1715 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio)); 1708 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
1716 return cfqq == RQ_CFQQ(rq); 1709 return cfqq == RQ_CFQQ(rq);
@@ -1794,7 +1787,7 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1794 cfqd->active_queue = NULL; 1787 cfqd->active_queue = NULL;
1795 1788
1796 if (cfqd->active_cic) { 1789 if (cfqd->active_cic) {
1797 put_io_context(cfqd->active_cic->icq.ioc, cfqd->queue); 1790 put_io_context(cfqd->active_cic->icq.ioc);
1798 cfqd->active_cic = NULL; 1791 cfqd->active_cic = NULL;
1799 } 1792 }
1800} 1793}
@@ -3117,17 +3110,18 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
3117 */ 3110 */
3118static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) 3111static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3119{ 3112{
3113 enum wl_type_t old_type = cfqq_type(cfqd->active_queue);
3114
3120 cfq_log_cfqq(cfqd, cfqq, "preempt"); 3115 cfq_log_cfqq(cfqd, cfqq, "preempt");
3116 cfq_slice_expired(cfqd, 1);
3121 3117
3122 /* 3118 /*
3123 * workload type is changed, don't save slice, otherwise preempt 3119 * workload type is changed, don't save slice, otherwise preempt
3124 * doesn't happen 3120 * doesn't happen
3125 */ 3121 */
3126 if (cfqq_type(cfqd->active_queue) != cfqq_type(cfqq)) 3122 if (old_type != cfqq_type(cfqq))
3127 cfqq->cfqg->saved_workload_slice = 0; 3123 cfqq->cfqg->saved_workload_slice = 0;
3128 3124
3129 cfq_slice_expired(cfqd, 1);
3130
3131 /* 3125 /*
3132 * Put the new queue at the front of the of the current list, 3126 * Put the new queue at the front of the of the current list,
3133 * so we know that it will be selected next. 3127 * so we know that it will be selected next.
@@ -3476,20 +3470,20 @@ cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
3476 const int rw = rq_data_dir(rq); 3470 const int rw = rq_data_dir(rq);
3477 const bool is_sync = rq_is_sync(rq); 3471 const bool is_sync = rq_is_sync(rq);
3478 struct cfq_queue *cfqq; 3472 struct cfq_queue *cfqq;
3473 unsigned int changed;
3479 3474
3480 might_sleep_if(gfp_mask & __GFP_WAIT); 3475 might_sleep_if(gfp_mask & __GFP_WAIT);
3481 3476
3482 spin_lock_irq(q->queue_lock); 3477 spin_lock_irq(q->queue_lock);
3483 3478
3484 /* handle changed notifications */ 3479 /* handle changed notifications */
3485 if (unlikely(cic->icq.changed)) { 3480 changed = icq_get_changed(&cic->icq);
3486 if (test_and_clear_bit(ICQ_IOPRIO_CHANGED, &cic->icq.changed)) 3481 if (unlikely(changed & ICQ_IOPRIO_CHANGED))
3487 changed_ioprio(cic); 3482 changed_ioprio(cic);
3488#ifdef CONFIG_CFQ_GROUP_IOSCHED 3483#ifdef CONFIG_CFQ_GROUP_IOSCHED
3489 if (test_and_clear_bit(ICQ_CGROUP_CHANGED, &cic->icq.changed)) 3484 if (unlikely(changed & ICQ_CGROUP_CHANGED))
3490 changed_cgroup(cic); 3485 changed_cgroup(cic);
3491#endif 3486#endif
3492 }
3493 3487
3494new_queue: 3488new_queue:
3495 cfqq = cic_to_cfqq(cic, is_sync); 3489 cfqq = cic_to_cfqq(cic, is_sync);
diff --git a/block/elevator.c b/block/elevator.c
index 91e18f8af9be..f016855a46b0 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -70,39 +70,9 @@ static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
70/* 70/*
71 * can we safely merge with this request? 71 * can we safely merge with this request?
72 */ 72 */
73int elv_rq_merge_ok(struct request *rq, struct bio *bio) 73bool elv_rq_merge_ok(struct request *rq, struct bio *bio)
74{ 74{
75 if (!rq_mergeable(rq)) 75 if (!blk_rq_merge_ok(rq, bio))
76 return 0;
77
78 /*
79 * Don't merge file system requests and discard requests
80 */
81 if ((bio->bi_rw & REQ_DISCARD) != (rq->bio->bi_rw & REQ_DISCARD))
82 return 0;
83
84 /*
85 * Don't merge discard requests and secure discard requests
86 */
87 if ((bio->bi_rw & REQ_SECURE) != (rq->bio->bi_rw & REQ_SECURE))
88 return 0;
89
90 /*
91 * different data direction or already started, don't merge
92 */
93 if (bio_data_dir(bio) != rq_data_dir(rq))
94 return 0;
95
96 /*
97 * must be same device and not a special request
98 */
99 if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special)
100 return 0;
101
102 /*
103 * only merge integrity protected bio into ditto rq
104 */
105 if (bio_integrity(bio) != blk_integrity_rq(rq))
106 return 0; 76 return 0;
107 77
108 if (!elv_iosched_allow_merge(rq, bio)) 78 if (!elv_iosched_allow_merge(rq, bio))
@@ -112,23 +82,6 @@ int elv_rq_merge_ok(struct request *rq, struct bio *bio)
112} 82}
113EXPORT_SYMBOL(elv_rq_merge_ok); 83EXPORT_SYMBOL(elv_rq_merge_ok);
114 84
115int elv_try_merge(struct request *__rq, struct bio *bio)
116{
117 int ret = ELEVATOR_NO_MERGE;
118
119 /*
120 * we can merge and sequence is ok, check if it's possible
121 */
122 if (elv_rq_merge_ok(__rq, bio)) {
123 if (blk_rq_pos(__rq) + blk_rq_sectors(__rq) == bio->bi_sector)
124 ret = ELEVATOR_BACK_MERGE;
125 else if (blk_rq_pos(__rq) - bio_sectors(bio) == bio->bi_sector)
126 ret = ELEVATOR_FRONT_MERGE;
127 }
128
129 return ret;
130}
131
132static struct elevator_type *elevator_find(const char *name) 85static struct elevator_type *elevator_find(const char *name)
133{ 86{
134 struct elevator_type *e; 87 struct elevator_type *e;
@@ -478,8 +431,8 @@ int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
478 /* 431 /*
479 * First try one-hit cache. 432 * First try one-hit cache.
480 */ 433 */
481 if (q->last_merge) { 434 if (q->last_merge && elv_rq_merge_ok(q->last_merge, bio)) {
482 ret = elv_try_merge(q->last_merge, bio); 435 ret = blk_try_merge(q->last_merge, bio);
483 if (ret != ELEVATOR_NO_MERGE) { 436 if (ret != ELEVATOR_NO_MERGE) {
484 *req = q->last_merge; 437 *req = q->last_merge;
485 return ret; 438 return ret;
diff --git a/block/genhd.c b/block/genhd.c
index 23b4f7063322..df9816ede75b 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -35,6 +35,7 @@ static DEFINE_IDR(ext_devt_idr);
35 35
36static struct device_type disk_type; 36static struct device_type disk_type;
37 37
38static void disk_alloc_events(struct gendisk *disk);
38static void disk_add_events(struct gendisk *disk); 39static void disk_add_events(struct gendisk *disk);
39static void disk_del_events(struct gendisk *disk); 40static void disk_del_events(struct gendisk *disk);
40static void disk_release_events(struct gendisk *disk); 41static void disk_release_events(struct gendisk *disk);
@@ -601,6 +602,8 @@ void add_disk(struct gendisk *disk)
601 disk->major = MAJOR(devt); 602 disk->major = MAJOR(devt);
602 disk->first_minor = MINOR(devt); 603 disk->first_minor = MINOR(devt);
603 604
605 disk_alloc_events(disk);
606
604 /* Register BDI before referencing it from bdev */ 607 /* Register BDI before referencing it from bdev */
605 bdi = &disk->queue->backing_dev_info; 608 bdi = &disk->queue->backing_dev_info;
606 bdi_register_dev(bdi, disk_devt(disk)); 609 bdi_register_dev(bdi, disk_devt(disk));
@@ -1475,9 +1478,9 @@ static void __disk_unblock_events(struct gendisk *disk, bool check_now)
1475 intv = disk_events_poll_jiffies(disk); 1478 intv = disk_events_poll_jiffies(disk);
1476 set_timer_slack(&ev->dwork.timer, intv / 4); 1479 set_timer_slack(&ev->dwork.timer, intv / 4);
1477 if (check_now) 1480 if (check_now)
1478 queue_delayed_work(system_nrt_wq, &ev->dwork, 0); 1481 queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0);
1479 else if (intv) 1482 else if (intv)
1480 queue_delayed_work(system_nrt_wq, &ev->dwork, intv); 1483 queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, intv);
1481out_unlock: 1484out_unlock:
1482 spin_unlock_irqrestore(&ev->lock, flags); 1485 spin_unlock_irqrestore(&ev->lock, flags);
1483} 1486}
@@ -1521,7 +1524,7 @@ void disk_flush_events(struct gendisk *disk, unsigned int mask)
1521 ev->clearing |= mask; 1524 ev->clearing |= mask;
1522 if (!ev->block) { 1525 if (!ev->block) {
1523 cancel_delayed_work(&ev->dwork); 1526 cancel_delayed_work(&ev->dwork);
1524 queue_delayed_work(system_nrt_wq, &ev->dwork, 0); 1527 queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0);
1525 } 1528 }
1526 spin_unlock_irq(&ev->lock); 1529 spin_unlock_irq(&ev->lock);
1527} 1530}
@@ -1558,7 +1561,7 @@ unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask)
1558 1561
1559 /* uncondtionally schedule event check and wait for it to finish */ 1562 /* uncondtionally schedule event check and wait for it to finish */
1560 disk_block_events(disk); 1563 disk_block_events(disk);
1561 queue_delayed_work(system_nrt_wq, &ev->dwork, 0); 1564 queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0);
1562 flush_delayed_work(&ev->dwork); 1565 flush_delayed_work(&ev->dwork);
1563 __disk_unblock_events(disk, false); 1566 __disk_unblock_events(disk, false);
1564 1567
@@ -1595,7 +1598,7 @@ static void disk_events_workfn(struct work_struct *work)
1595 1598
1596 intv = disk_events_poll_jiffies(disk); 1599 intv = disk_events_poll_jiffies(disk);
1597 if (!ev->block && intv) 1600 if (!ev->block && intv)
1598 queue_delayed_work(system_nrt_wq, &ev->dwork, intv); 1601 queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, intv);
1599 1602
1600 spin_unlock_irq(&ev->lock); 1603 spin_unlock_irq(&ev->lock);
1601 1604
@@ -1733,9 +1736,9 @@ module_param_cb(events_dfl_poll_msecs, &disk_events_dfl_poll_msecs_param_ops,
1733 &disk_events_dfl_poll_msecs, 0644); 1736 &disk_events_dfl_poll_msecs, 0644);
1734 1737
1735/* 1738/*
1736 * disk_{add|del|release}_events - initialize and destroy disk_events. 1739 * disk_{alloc|add|del|release}_events - initialize and destroy disk_events.
1737 */ 1740 */
1738static void disk_add_events(struct gendisk *disk) 1741static void disk_alloc_events(struct gendisk *disk)
1739{ 1742{
1740 struct disk_events *ev; 1743 struct disk_events *ev;
1741 1744
@@ -1748,16 +1751,6 @@ static void disk_add_events(struct gendisk *disk)
1748 return; 1751 return;
1749 } 1752 }
1750 1753
1751 if (sysfs_create_files(&disk_to_dev(disk)->kobj,
1752 disk_events_attrs) < 0) {
1753 pr_warn("%s: failed to create sysfs files for events\n",
1754 disk->disk_name);
1755 kfree(ev);
1756 return;
1757 }
1758
1759 disk->ev = ev;
1760
1761 INIT_LIST_HEAD(&ev->node); 1754 INIT_LIST_HEAD(&ev->node);
1762 ev->disk = disk; 1755 ev->disk = disk;
1763 spin_lock_init(&ev->lock); 1756 spin_lock_init(&ev->lock);
@@ -1766,8 +1759,21 @@ static void disk_add_events(struct gendisk *disk)
1766 ev->poll_msecs = -1; 1759 ev->poll_msecs = -1;
1767 INIT_DELAYED_WORK(&ev->dwork, disk_events_workfn); 1760 INIT_DELAYED_WORK(&ev->dwork, disk_events_workfn);
1768 1761
1762 disk->ev = ev;
1763}
1764
1765static void disk_add_events(struct gendisk *disk)
1766{
1767 if (!disk->ev)
1768 return;
1769
1770 /* FIXME: error handling */
1771 if (sysfs_create_files(&disk_to_dev(disk)->kobj, disk_events_attrs) < 0)
1772 pr_warn("%s: failed to create sysfs files for events\n",
1773 disk->disk_name);
1774
1769 mutex_lock(&disk_events_mutex); 1775 mutex_lock(&disk_events_mutex);
1770 list_add_tail(&ev->node, &disk_events); 1776 list_add_tail(&disk->ev->node, &disk_events);
1771 mutex_unlock(&disk_events_mutex); 1777 mutex_unlock(&disk_events_mutex);
1772 1778
1773 /* 1779 /*
diff --git a/block/partition-generic.c b/block/partition-generic.c
index d06ec1c829c2..6df5d6928a44 100644
--- a/block/partition-generic.c
+++ b/block/partition-generic.c
@@ -389,17 +389,11 @@ static bool disk_unlock_native_capacity(struct gendisk *disk)
389 } 389 }
390} 390}
391 391
392int rescan_partitions(struct gendisk *disk, struct block_device *bdev) 392static int drop_partitions(struct gendisk *disk, struct block_device *bdev)
393{ 393{
394 struct parsed_partitions *state = NULL;
395 struct disk_part_iter piter; 394 struct disk_part_iter piter;
396 struct hd_struct *part; 395 struct hd_struct *part;
397 int p, highest, res; 396 int res;
398rescan:
399 if (state && !IS_ERR(state)) {
400 kfree(state);
401 state = NULL;
402 }
403 397
404 if (bdev->bd_part_count) 398 if (bdev->bd_part_count)
405 return -EBUSY; 399 return -EBUSY;
@@ -412,6 +406,24 @@ rescan:
412 delete_partition(disk, part->partno); 406 delete_partition(disk, part->partno);
413 disk_part_iter_exit(&piter); 407 disk_part_iter_exit(&piter);
414 408
409 return 0;
410}
411
412int rescan_partitions(struct gendisk *disk, struct block_device *bdev)
413{
414 struct parsed_partitions *state = NULL;
415 struct hd_struct *part;
416 int p, highest, res;
417rescan:
418 if (state && !IS_ERR(state)) {
419 kfree(state);
420 state = NULL;
421 }
422
423 res = drop_partitions(disk, bdev);
424 if (res)
425 return res;
426
415 if (disk->fops->revalidate_disk) 427 if (disk->fops->revalidate_disk)
416 disk->fops->revalidate_disk(disk); 428 disk->fops->revalidate_disk(disk);
417 check_disk_size_change(disk, bdev); 429 check_disk_size_change(disk, bdev);
@@ -515,6 +527,26 @@ rescan:
515 return 0; 527 return 0;
516} 528}
517 529
530int invalidate_partitions(struct gendisk *disk, struct block_device *bdev)
531{
532 int res;
533
534 if (!bdev->bd_invalidated)
535 return 0;
536
537 res = drop_partitions(disk, bdev);
538 if (res)
539 return res;
540
541 set_capacity(disk, 0);
542 check_disk_size_change(disk, bdev);
543 bdev->bd_invalidated = 0;
544 /* tell userspace that the media / partition table may have changed */
545 kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE);
546
547 return 0;
548}
549
518unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p) 550unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p)
519{ 551{
520 struct address_space *mapping = bdev->bd_inode->i_mapping; 552 struct address_space *mapping = bdev->bd_inode->i_mapping;
diff --git a/block/partitions/ldm.c b/block/partitions/ldm.c
index bd8ae788f689..e507cfbd044e 100644
--- a/block/partitions/ldm.c
+++ b/block/partitions/ldm.c
@@ -2,7 +2,7 @@
2 * ldm - Support for Windows Logical Disk Manager (Dynamic Disks) 2 * ldm - Support for Windows Logical Disk Manager (Dynamic Disks)
3 * 3 *
4 * Copyright (C) 2001,2002 Richard Russon <ldm@flatcap.org> 4 * Copyright (C) 2001,2002 Richard Russon <ldm@flatcap.org>
5 * Copyright (c) 2001-2007 Anton Altaparmakov 5 * Copyright (c) 2001-2012 Anton Altaparmakov
6 * Copyright (C) 2001,2002 Jakob Kemi <jakob.kemi@telia.com> 6 * Copyright (C) 2001,2002 Jakob Kemi <jakob.kemi@telia.com>
7 * 7 *
8 * Documentation is available at http://www.linux-ntfs.org/doku.php?id=downloads 8 * Documentation is available at http://www.linux-ntfs.org/doku.php?id=downloads
@@ -1341,20 +1341,17 @@ found:
1341 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, f->num); 1341 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, f->num);
1342 return false; 1342 return false;
1343 } 1343 }
1344
1345 if (f->map & (1 << rec)) { 1344 if (f->map & (1 << rec)) {
1346 ldm_error ("Duplicate VBLK, part %d.", rec); 1345 ldm_error ("Duplicate VBLK, part %d.", rec);
1347 f->map &= 0x7F; /* Mark the group as broken */ 1346 f->map &= 0x7F; /* Mark the group as broken */
1348 return false; 1347 return false;
1349 } 1348 }
1350
1351 f->map |= (1 << rec); 1349 f->map |= (1 << rec);
1352 1350 if (!rec)
1351 memcpy(f->data, data, VBLK_SIZE_HEAD);
1353 data += VBLK_SIZE_HEAD; 1352 data += VBLK_SIZE_HEAD;
1354 size -= VBLK_SIZE_HEAD; 1353 size -= VBLK_SIZE_HEAD;
1355 1354 memcpy(f->data + VBLK_SIZE_HEAD + rec * size, data, size);
1356 memcpy (f->data+rec*(size-VBLK_SIZE_HEAD)+VBLK_SIZE_HEAD, data, size);
1357
1358 return true; 1355 return true;
1359} 1356}
1360 1357