aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-02-11 13:07:11 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-02-11 13:07:11 -0500
commit3ec1e88b33a3bdd852ce8e014052acec7a9da8b5 (patch)
tree4270f8f0de4e28f090cba6d6e4047aae939d6463
parent8df54d622a120058ee8bec38743c9b8f091c8e58 (diff)
parentd8c66c5d59247e25a69428aced0b79d33b9c66d6 (diff)
Merge branch 'for-linus' of git://git.kernel.dk/linux-block
Says Jens: "Time to push off some of the pending items. I really wanted to wait until we had the regression nailed, but alas it's not quite there yet. But I'm very confident that it's "just" a missing expire on exit, so fix from Tejun should be fairly trivial. I'm headed out for a week on the slopes. - Killing the barrier part of mtip32xx. It doesn't really support barriers, and it doesn't need them (writes are fully ordered). - A few fixes from Dan Carpenter, preventing overflows of integer multiplication. - A fixup for loop, fixing a previous commit that didn't quite solve the partial read problem from Dave Young. - A bio integer overflow fix from Kent Overstreet. - Improvement/fix of the door "keep locked" part of the cdrom shared code from Paolo Benzini. - A few cfq fixes from Shaohua Li. - A fix for bsg sysfs warning when removing a file it did not create from Stanislaw Gruszka. - Two fixes for floppy from Vivek, preventing a crash. - A few block core fixes from Tejun. One killing the over-optimized ioc exit path, cleaning that up nicely. Two others fixing an oops on elevator switch, due to calling into the scheduler merge check code without holding the queue lock." * 'for-linus' of git://git.kernel.dk/linux-block: block: fix lockdep warning on io_context release put_io_context() relay: prevent integer overflow in relay_open() loop: zero fill bio instead of return -EIO for partial read bio: don't overflow in bio_get_nr_vecs() floppy: Fix a crash during rmmod floppy: Cleanup disk->queue before caling put_disk() if add_disk() was never called cdrom: move shared static to cdrom_device_info bsg: fix sysfs link remove warning block: don't call elevator callbacks for plug merges block: separate out blk_rq_merge_ok() and blk_try_merge() from elevator functions mtip32xx: removed the irrelevant argument of mtip_hw_submit_io() and the unused member of struct driver_data block: strip out locking optimization in put_io_context() cdrom: use copy_to_user() without the underscores block: fix ioc locking warning block: fix NULL icq_cache reference block,cfq: change code order
-rw-r--r--block/blk-cgroup.c2
-rw-r--r--block/blk-core.c33
-rw-r--r--block/blk-ioc.c111
-rw-r--r--block/blk-merge.c37
-rw-r--r--block/blk.h2
-rw-r--r--block/bsg.c3
-rw-r--r--block/cfq-iosched.c24
-rw-r--r--block/elevator.c55
-rw-r--r--drivers/block/floppy.c17
-rw-r--r--drivers/block/loop.c24
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c11
-rw-r--r--drivers/block/mtip32xx/mtip32xx.h5
-rw-r--r--drivers/cdrom/cdrom.c20
-rw-r--r--fs/bio.c10
-rw-r--r--fs/ioprio.c2
-rw-r--r--include/linux/blkdev.h3
-rw-r--r--include/linux/cdrom.h3
-rw-r--r--include/linux/elevator.h9
-rw-r--r--include/linux/iocontext.h5
-rw-r--r--kernel/fork.c2
-rw-r--r--kernel/relay.c10
21 files changed, 155 insertions, 233 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index fa8f26309444..75642a352a8f 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -1659,7 +1659,7 @@ static void blkiocg_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
1659 ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE); 1659 ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
1660 if (ioc) { 1660 if (ioc) {
1661 ioc_cgroup_changed(ioc); 1661 ioc_cgroup_changed(ioc);
1662 put_io_context(ioc, NULL); 1662 put_io_context(ioc);
1663 } 1663 }
1664 } 1664 }
1665} 1665}
diff --git a/block/blk-core.c b/block/blk-core.c
index e6c05a97ee2b..3a78b00edd71 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -642,7 +642,7 @@ static inline void blk_free_request(struct request_queue *q, struct request *rq)
642 if (rq->cmd_flags & REQ_ELVPRIV) { 642 if (rq->cmd_flags & REQ_ELVPRIV) {
643 elv_put_request(q, rq); 643 elv_put_request(q, rq);
644 if (rq->elv.icq) 644 if (rq->elv.icq)
645 put_io_context(rq->elv.icq->ioc, q); 645 put_io_context(rq->elv.icq->ioc);
646 } 646 }
647 647
648 mempool_free(rq, q->rq.rq_pool); 648 mempool_free(rq, q->rq.rq_pool);
@@ -872,13 +872,15 @@ retry:
872 spin_unlock_irq(q->queue_lock); 872 spin_unlock_irq(q->queue_lock);
873 873
874 /* create icq if missing */ 874 /* create icq if missing */
875 if (unlikely(et->icq_cache && !icq)) 875 if ((rw_flags & REQ_ELVPRIV) && unlikely(et->icq_cache && !icq)) {
876 icq = ioc_create_icq(q, gfp_mask); 876 icq = ioc_create_icq(q, gfp_mask);
877 if (!icq)
878 goto fail_icq;
879 }
877 880
878 /* rqs are guaranteed to have icq on elv_set_request() if requested */ 881 rq = blk_alloc_request(q, icq, rw_flags, gfp_mask);
879 if (likely(!et->icq_cache || icq))
880 rq = blk_alloc_request(q, icq, rw_flags, gfp_mask);
881 882
883fail_icq:
882 if (unlikely(!rq)) { 884 if (unlikely(!rq)) {
883 /* 885 /*
884 * Allocation failed presumably due to memory. Undo anything 886 * Allocation failed presumably due to memory. Undo anything
@@ -1210,7 +1212,6 @@ static bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
1210 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); 1212 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
1211 1213
1212 drive_stat_acct(req, 0); 1214 drive_stat_acct(req, 0);
1213 elv_bio_merged(q, req, bio);
1214 return true; 1215 return true;
1215} 1216}
1216 1217
@@ -1241,7 +1242,6 @@ static bool bio_attempt_front_merge(struct request_queue *q,
1241 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); 1242 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
1242 1243
1243 drive_stat_acct(req, 0); 1244 drive_stat_acct(req, 0);
1244 elv_bio_merged(q, req, bio);
1245 return true; 1245 return true;
1246} 1246}
1247 1247
@@ -1255,13 +1255,12 @@ static bool bio_attempt_front_merge(struct request_queue *q,
1255 * on %current's plugged list. Returns %true if merge was successful, 1255 * on %current's plugged list. Returns %true if merge was successful,
1256 * otherwise %false. 1256 * otherwise %false.
1257 * 1257 *
1258 * This function is called without @q->queue_lock; however, elevator is 1258 * Plugging coalesces IOs from the same issuer for the same purpose without
1259 * accessed iff there already are requests on the plugged list which in 1259 * going through @q->queue_lock. As such it's more of an issuing mechanism
1260 * turn guarantees validity of the elevator. 1260 * than scheduling, and the request, while may have elvpriv data, is not
1261 * 1261 * added on the elevator at this point. In addition, we don't have
1262 * Note that, on successful merge, elevator operation 1262 * reliable access to the elevator outside queue lock. Only check basic
1263 * elevator_bio_merged_fn() will be called without queue lock. Elevator 1263 * merging parameters without querying the elevator.
1264 * must be ready for this.
1265 */ 1264 */
1266static bool attempt_plug_merge(struct request_queue *q, struct bio *bio, 1265static bool attempt_plug_merge(struct request_queue *q, struct bio *bio,
1267 unsigned int *request_count) 1266 unsigned int *request_count)
@@ -1280,10 +1279,10 @@ static bool attempt_plug_merge(struct request_queue *q, struct bio *bio,
1280 1279
1281 (*request_count)++; 1280 (*request_count)++;
1282 1281
1283 if (rq->q != q) 1282 if (rq->q != q || !blk_rq_merge_ok(rq, bio))
1284 continue; 1283 continue;
1285 1284
1286 el_ret = elv_try_merge(rq, bio); 1285 el_ret = blk_try_merge(rq, bio);
1287 if (el_ret == ELEVATOR_BACK_MERGE) { 1286 if (el_ret == ELEVATOR_BACK_MERGE) {
1288 ret = bio_attempt_back_merge(q, rq, bio); 1287 ret = bio_attempt_back_merge(q, rq, bio);
1289 if (ret) 1288 if (ret)
@@ -1345,12 +1344,14 @@ void blk_queue_bio(struct request_queue *q, struct bio *bio)
1345 el_ret = elv_merge(q, &req, bio); 1344 el_ret = elv_merge(q, &req, bio);
1346 if (el_ret == ELEVATOR_BACK_MERGE) { 1345 if (el_ret == ELEVATOR_BACK_MERGE) {
1347 if (bio_attempt_back_merge(q, req, bio)) { 1346 if (bio_attempt_back_merge(q, req, bio)) {
1347 elv_bio_merged(q, req, bio);
1348 if (!attempt_back_merge(q, req)) 1348 if (!attempt_back_merge(q, req))
1349 elv_merged_request(q, req, el_ret); 1349 elv_merged_request(q, req, el_ret);
1350 goto out_unlock; 1350 goto out_unlock;
1351 } 1351 }
1352 } else if (el_ret == ELEVATOR_FRONT_MERGE) { 1352 } else if (el_ret == ELEVATOR_FRONT_MERGE) {
1353 if (bio_attempt_front_merge(q, req, bio)) { 1353 if (bio_attempt_front_merge(q, req, bio)) {
1354 elv_bio_merged(q, req, bio);
1354 if (!attempt_front_merge(q, req)) 1355 if (!attempt_front_merge(q, req))
1355 elv_merged_request(q, req, el_ret); 1356 elv_merged_request(q, req, el_ret);
1356 goto out_unlock; 1357 goto out_unlock;
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index 27a06e00eaec..8b782a63c297 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -29,21 +29,6 @@ void get_io_context(struct io_context *ioc)
29} 29}
30EXPORT_SYMBOL(get_io_context); 30EXPORT_SYMBOL(get_io_context);
31 31
32/*
33 * Releasing ioc may nest into another put_io_context() leading to nested
34 * fast path release. As the ioc's can't be the same, this is okay but
35 * makes lockdep whine. Keep track of nesting and use it as subclass.
36 */
37#ifdef CONFIG_LOCKDEP
38#define ioc_release_depth(q) ((q) ? (q)->ioc_release_depth : 0)
39#define ioc_release_depth_inc(q) (q)->ioc_release_depth++
40#define ioc_release_depth_dec(q) (q)->ioc_release_depth--
41#else
42#define ioc_release_depth(q) 0
43#define ioc_release_depth_inc(q) do { } while (0)
44#define ioc_release_depth_dec(q) do { } while (0)
45#endif
46
47static void icq_free_icq_rcu(struct rcu_head *head) 32static void icq_free_icq_rcu(struct rcu_head *head)
48{ 33{
49 struct io_cq *icq = container_of(head, struct io_cq, __rcu_head); 34 struct io_cq *icq = container_of(head, struct io_cq, __rcu_head);
@@ -75,11 +60,8 @@ static void ioc_exit_icq(struct io_cq *icq)
75 if (rcu_dereference_raw(ioc->icq_hint) == icq) 60 if (rcu_dereference_raw(ioc->icq_hint) == icq)
76 rcu_assign_pointer(ioc->icq_hint, NULL); 61 rcu_assign_pointer(ioc->icq_hint, NULL);
77 62
78 if (et->ops.elevator_exit_icq_fn) { 63 if (et->ops.elevator_exit_icq_fn)
79 ioc_release_depth_inc(q);
80 et->ops.elevator_exit_icq_fn(icq); 64 et->ops.elevator_exit_icq_fn(icq);
81 ioc_release_depth_dec(q);
82 }
83 65
84 /* 66 /*
85 * @icq->q might have gone away by the time RCU callback runs 67 * @icq->q might have gone away by the time RCU callback runs
@@ -98,8 +80,15 @@ static void ioc_release_fn(struct work_struct *work)
98 struct io_context *ioc = container_of(work, struct io_context, 80 struct io_context *ioc = container_of(work, struct io_context,
99 release_work); 81 release_work);
100 struct request_queue *last_q = NULL; 82 struct request_queue *last_q = NULL;
83 unsigned long flags;
101 84
102 spin_lock_irq(&ioc->lock); 85 /*
86 * Exiting icq may call into put_io_context() through elevator
87 * which will trigger lockdep warning. The ioc's are guaranteed to
88 * be different, use a different locking subclass here. Use
89 * irqsave variant as there's no spin_lock_irq_nested().
90 */
91 spin_lock_irqsave_nested(&ioc->lock, flags, 1);
103 92
104 while (!hlist_empty(&ioc->icq_list)) { 93 while (!hlist_empty(&ioc->icq_list)) {
105 struct io_cq *icq = hlist_entry(ioc->icq_list.first, 94 struct io_cq *icq = hlist_entry(ioc->icq_list.first,
@@ -121,15 +110,15 @@ static void ioc_release_fn(struct work_struct *work)
121 */ 110 */
122 if (last_q) { 111 if (last_q) {
123 spin_unlock(last_q->queue_lock); 112 spin_unlock(last_q->queue_lock);
124 spin_unlock_irq(&ioc->lock); 113 spin_unlock_irqrestore(&ioc->lock, flags);
125 blk_put_queue(last_q); 114 blk_put_queue(last_q);
126 } else { 115 } else {
127 spin_unlock_irq(&ioc->lock); 116 spin_unlock_irqrestore(&ioc->lock, flags);
128 } 117 }
129 118
130 last_q = this_q; 119 last_q = this_q;
131 spin_lock_irq(this_q->queue_lock); 120 spin_lock_irqsave(this_q->queue_lock, flags);
132 spin_lock(&ioc->lock); 121 spin_lock_nested(&ioc->lock, 1);
133 continue; 122 continue;
134 } 123 }
135 ioc_exit_icq(icq); 124 ioc_exit_icq(icq);
@@ -137,10 +126,10 @@ static void ioc_release_fn(struct work_struct *work)
137 126
138 if (last_q) { 127 if (last_q) {
139 spin_unlock(last_q->queue_lock); 128 spin_unlock(last_q->queue_lock);
140 spin_unlock_irq(&ioc->lock); 129 spin_unlock_irqrestore(&ioc->lock, flags);
141 blk_put_queue(last_q); 130 blk_put_queue(last_q);
142 } else { 131 } else {
143 spin_unlock_irq(&ioc->lock); 132 spin_unlock_irqrestore(&ioc->lock, flags);
144 } 133 }
145 134
146 kmem_cache_free(iocontext_cachep, ioc); 135 kmem_cache_free(iocontext_cachep, ioc);
@@ -149,79 +138,29 @@ static void ioc_release_fn(struct work_struct *work)
149/** 138/**
150 * put_io_context - put a reference of io_context 139 * put_io_context - put a reference of io_context
151 * @ioc: io_context to put 140 * @ioc: io_context to put
152 * @locked_q: request_queue the caller is holding queue_lock of (hint)
153 * 141 *
154 * Decrement reference count of @ioc and release it if the count reaches 142 * Decrement reference count of @ioc and release it if the count reaches
155 * zero. If the caller is holding queue_lock of a queue, it can indicate 143 * zero.
156 * that with @locked_q. This is an optimization hint and the caller is
157 * allowed to pass in %NULL even when it's holding a queue_lock.
158 */ 144 */
159void put_io_context(struct io_context *ioc, struct request_queue *locked_q) 145void put_io_context(struct io_context *ioc)
160{ 146{
161 struct request_queue *last_q = locked_q;
162 unsigned long flags; 147 unsigned long flags;
163 148
164 if (ioc == NULL) 149 if (ioc == NULL)
165 return; 150 return;
166 151
167 BUG_ON(atomic_long_read(&ioc->refcount) <= 0); 152 BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
168 if (locked_q)
169 lockdep_assert_held(locked_q->queue_lock);
170
171 if (!atomic_long_dec_and_test(&ioc->refcount))
172 return;
173 153
174 /* 154 /*
175 * Destroy @ioc. This is a bit messy because icq's are chained 155 * Releasing ioc requires reverse order double locking and we may
176 * from both ioc and queue, and ioc->lock nests inside queue_lock. 156 * already be holding a queue_lock. Do it asynchronously from wq.
177 * The inner ioc->lock should be held to walk our icq_list and then
178 * for each icq the outer matching queue_lock should be grabbed.
179 * ie. We need to do reverse-order double lock dancing.
180 *
181 * Another twist is that we are often called with one of the
182 * matching queue_locks held as indicated by @locked_q, which
183 * prevents performing double-lock dance for other queues.
184 *
185 * So, we do it in two stages. The fast path uses the queue_lock
186 * the caller is holding and, if other queues need to be accessed,
187 * uses trylock to avoid introducing locking dependency. This can
188 * handle most cases, especially if @ioc was performing IO on only
189 * single device.
190 *
191 * If trylock doesn't cut it, we defer to @ioc->release_work which
192 * can do all the double-locking dancing.
193 */ 157 */
194 spin_lock_irqsave_nested(&ioc->lock, flags, 158 if (atomic_long_dec_and_test(&ioc->refcount)) {
195 ioc_release_depth(locked_q)); 159 spin_lock_irqsave(&ioc->lock, flags);
196 160 if (!hlist_empty(&ioc->icq_list))
197 while (!hlist_empty(&ioc->icq_list)) { 161 schedule_work(&ioc->release_work);
198 struct io_cq *icq = hlist_entry(ioc->icq_list.first, 162 spin_unlock_irqrestore(&ioc->lock, flags);
199 struct io_cq, ioc_node);
200 struct request_queue *this_q = icq->q;
201
202 if (this_q != last_q) {
203 if (last_q && last_q != locked_q)
204 spin_unlock(last_q->queue_lock);
205 last_q = NULL;
206
207 if (!spin_trylock(this_q->queue_lock))
208 break;
209 last_q = this_q;
210 continue;
211 }
212 ioc_exit_icq(icq);
213 } 163 }
214
215 if (last_q && last_q != locked_q)
216 spin_unlock(last_q->queue_lock);
217
218 spin_unlock_irqrestore(&ioc->lock, flags);
219
220 /* if no icq is left, we're done; otherwise, kick release_work */
221 if (hlist_empty(&ioc->icq_list))
222 kmem_cache_free(iocontext_cachep, ioc);
223 else
224 schedule_work(&ioc->release_work);
225} 164}
226EXPORT_SYMBOL(put_io_context); 165EXPORT_SYMBOL(put_io_context);
227 166
@@ -236,7 +175,7 @@ void exit_io_context(struct task_struct *task)
236 task_unlock(task); 175 task_unlock(task);
237 176
238 atomic_dec(&ioc->nr_tasks); 177 atomic_dec(&ioc->nr_tasks);
239 put_io_context(ioc, NULL); 178 put_io_context(ioc);
240} 179}
241 180
242/** 181/**
diff --git a/block/blk-merge.c b/block/blk-merge.c
index cfcc37cb222b..160035f54882 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -471,3 +471,40 @@ int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
471{ 471{
472 return attempt_merge(q, rq, next); 472 return attempt_merge(q, rq, next);
473} 473}
474
475bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
476{
477 if (!rq_mergeable(rq))
478 return false;
479
480 /* don't merge file system requests and discard requests */
481 if ((bio->bi_rw & REQ_DISCARD) != (rq->bio->bi_rw & REQ_DISCARD))
482 return false;
483
484 /* don't merge discard requests and secure discard requests */
485 if ((bio->bi_rw & REQ_SECURE) != (rq->bio->bi_rw & REQ_SECURE))
486 return false;
487
488 /* different data direction or already started, don't merge */
489 if (bio_data_dir(bio) != rq_data_dir(rq))
490 return false;
491
492 /* must be same device and not a special request */
493 if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special)
494 return false;
495
496 /* only merge integrity protected bio into ditto rq */
497 if (bio_integrity(bio) != blk_integrity_rq(rq))
498 return false;
499
500 return true;
501}
502
503int blk_try_merge(struct request *rq, struct bio *bio)
504{
505 if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_sector)
506 return ELEVATOR_BACK_MERGE;
507 else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_sector)
508 return ELEVATOR_FRONT_MERGE;
509 return ELEVATOR_NO_MERGE;
510}
diff --git a/block/blk.h b/block/blk.h
index 7efd772336de..9c12f80882b0 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -137,6 +137,8 @@ int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
137 struct request *next); 137 struct request *next);
138void blk_recalc_rq_segments(struct request *rq); 138void blk_recalc_rq_segments(struct request *rq);
139void blk_rq_set_mixed_merge(struct request *rq); 139void blk_rq_set_mixed_merge(struct request *rq);
140bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
141int blk_try_merge(struct request *rq, struct bio *bio);
140 142
141void blk_queue_congestion_threshold(struct request_queue *q); 143void blk_queue_congestion_threshold(struct request_queue *q);
142 144
diff --git a/block/bsg.c b/block/bsg.c
index 4cf703fd98bb..ff64ae3bacee 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -983,7 +983,8 @@ void bsg_unregister_queue(struct request_queue *q)
983 983
984 mutex_lock(&bsg_mutex); 984 mutex_lock(&bsg_mutex);
985 idr_remove(&bsg_minor_idr, bcd->minor); 985 idr_remove(&bsg_minor_idr, bcd->minor);
986 sysfs_remove_link(&q->kobj, "bsg"); 986 if (q->kobj.sd)
987 sysfs_remove_link(&q->kobj, "bsg");
987 device_unregister(bcd->class_dev); 988 device_unregister(bcd->class_dev);
988 bcd->class_dev = NULL; 989 bcd->class_dev = NULL;
989 kref_put(&bcd->ref, bsg_kref_release_function); 990 kref_put(&bcd->ref, bsg_kref_release_function);
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index ee55019066a1..d0ba50533668 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1699,18 +1699,11 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq,
1699 1699
1700 /* 1700 /*
1701 * Lookup the cfqq that this bio will be queued with and allow 1701 * Lookup the cfqq that this bio will be queued with and allow
1702 * merge only if rq is queued there. This function can be called 1702 * merge only if rq is queued there.
1703 * from plug merge without queue_lock. In such cases, ioc of @rq
1704 * and %current are guaranteed to be equal. Avoid lookup which
1705 * requires queue_lock by using @rq's cic.
1706 */ 1703 */
1707 if (current->io_context == RQ_CIC(rq)->icq.ioc) { 1704 cic = cfq_cic_lookup(cfqd, current->io_context);
1708 cic = RQ_CIC(rq); 1705 if (!cic)
1709 } else { 1706 return false;
1710 cic = cfq_cic_lookup(cfqd, current->io_context);
1711 if (!cic)
1712 return false;
1713 }
1714 1707
1715 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio)); 1708 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
1716 return cfqq == RQ_CFQQ(rq); 1709 return cfqq == RQ_CFQQ(rq);
@@ -1794,7 +1787,7 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1794 cfqd->active_queue = NULL; 1787 cfqd->active_queue = NULL;
1795 1788
1796 if (cfqd->active_cic) { 1789 if (cfqd->active_cic) {
1797 put_io_context(cfqd->active_cic->icq.ioc, cfqd->queue); 1790 put_io_context(cfqd->active_cic->icq.ioc);
1798 cfqd->active_cic = NULL; 1791 cfqd->active_cic = NULL;
1799 } 1792 }
1800} 1793}
@@ -3117,17 +3110,18 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
3117 */ 3110 */
3118static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) 3111static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3119{ 3112{
3113 enum wl_type_t old_type = cfqq_type(cfqd->active_queue);
3114
3120 cfq_log_cfqq(cfqd, cfqq, "preempt"); 3115 cfq_log_cfqq(cfqd, cfqq, "preempt");
3116 cfq_slice_expired(cfqd, 1);
3121 3117
3122 /* 3118 /*
3123 * workload type is changed, don't save slice, otherwise preempt 3119 * workload type is changed, don't save slice, otherwise preempt
3124 * doesn't happen 3120 * doesn't happen
3125 */ 3121 */
3126 if (cfqq_type(cfqd->active_queue) != cfqq_type(cfqq)) 3122 if (old_type != cfqq_type(cfqq))
3127 cfqq->cfqg->saved_workload_slice = 0; 3123 cfqq->cfqg->saved_workload_slice = 0;
3128 3124
3129 cfq_slice_expired(cfqd, 1);
3130
3131 /* 3125 /*
3132 * Put the new queue at the front of the of the current list, 3126 * Put the new queue at the front of the of the current list,
3133 * so we know that it will be selected next. 3127 * so we know that it will be selected next.
diff --git a/block/elevator.c b/block/elevator.c
index 91e18f8af9be..f016855a46b0 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -70,39 +70,9 @@ static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
70/* 70/*
71 * can we safely merge with this request? 71 * can we safely merge with this request?
72 */ 72 */
73int elv_rq_merge_ok(struct request *rq, struct bio *bio) 73bool elv_rq_merge_ok(struct request *rq, struct bio *bio)
74{ 74{
75 if (!rq_mergeable(rq)) 75 if (!blk_rq_merge_ok(rq, bio))
76 return 0;
77
78 /*
79 * Don't merge file system requests and discard requests
80 */
81 if ((bio->bi_rw & REQ_DISCARD) != (rq->bio->bi_rw & REQ_DISCARD))
82 return 0;
83
84 /*
85 * Don't merge discard requests and secure discard requests
86 */
87 if ((bio->bi_rw & REQ_SECURE) != (rq->bio->bi_rw & REQ_SECURE))
88 return 0;
89
90 /*
91 * different data direction or already started, don't merge
92 */
93 if (bio_data_dir(bio) != rq_data_dir(rq))
94 return 0;
95
96 /*
97 * must be same device and not a special request
98 */
99 if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special)
100 return 0;
101
102 /*
103 * only merge integrity protected bio into ditto rq
104 */
105 if (bio_integrity(bio) != blk_integrity_rq(rq))
106 return 0; 76 return 0;
107 77
108 if (!elv_iosched_allow_merge(rq, bio)) 78 if (!elv_iosched_allow_merge(rq, bio))
@@ -112,23 +82,6 @@ int elv_rq_merge_ok(struct request *rq, struct bio *bio)
112} 82}
113EXPORT_SYMBOL(elv_rq_merge_ok); 83EXPORT_SYMBOL(elv_rq_merge_ok);
114 84
115int elv_try_merge(struct request *__rq, struct bio *bio)
116{
117 int ret = ELEVATOR_NO_MERGE;
118
119 /*
120 * we can merge and sequence is ok, check if it's possible
121 */
122 if (elv_rq_merge_ok(__rq, bio)) {
123 if (blk_rq_pos(__rq) + blk_rq_sectors(__rq) == bio->bi_sector)
124 ret = ELEVATOR_BACK_MERGE;
125 else if (blk_rq_pos(__rq) - bio_sectors(bio) == bio->bi_sector)
126 ret = ELEVATOR_FRONT_MERGE;
127 }
128
129 return ret;
130}
131
132static struct elevator_type *elevator_find(const char *name) 85static struct elevator_type *elevator_find(const char *name)
133{ 86{
134 struct elevator_type *e; 87 struct elevator_type *e;
@@ -478,8 +431,8 @@ int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
478 /* 431 /*
479 * First try one-hit cache. 432 * First try one-hit cache.
480 */ 433 */
481 if (q->last_merge) { 434 if (q->last_merge && elv_rq_merge_ok(q->last_merge, bio)) {
482 ret = elv_try_merge(q->last_merge, bio); 435 ret = blk_try_merge(q->last_merge, bio);
483 if (ret != ELEVATOR_NO_MERGE) { 436 if (ret != ELEVATOR_NO_MERGE) {
484 *req = q->last_merge; 437 *req = q->last_merge;
485 return ret; 438 return ret;
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 510fb10ec45a..9baf11e86362 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -4368,8 +4368,14 @@ out_unreg_blkdev:
4368out_put_disk: 4368out_put_disk:
4369 while (dr--) { 4369 while (dr--) {
4370 del_timer_sync(&motor_off_timer[dr]); 4370 del_timer_sync(&motor_off_timer[dr]);
4371 if (disks[dr]->queue) 4371 if (disks[dr]->queue) {
4372 blk_cleanup_queue(disks[dr]->queue); 4372 blk_cleanup_queue(disks[dr]->queue);
4373 /*
4374 * put_disk() is not paired with add_disk() and
4375 * will put queue reference one extra time. fix it.
4376 */
4377 disks[dr]->queue = NULL;
4378 }
4373 put_disk(disks[dr]); 4379 put_disk(disks[dr]);
4374 } 4380 }
4375 return err; 4381 return err;
@@ -4579,6 +4585,15 @@ static void __exit floppy_module_exit(void)
4579 platform_device_unregister(&floppy_device[drive]); 4585 platform_device_unregister(&floppy_device[drive]);
4580 } 4586 }
4581 blk_cleanup_queue(disks[drive]->queue); 4587 blk_cleanup_queue(disks[drive]->queue);
4588
4589 /*
4590 * These disks have not called add_disk(). Don't put down
4591 * queue reference in put_disk().
4592 */
4593 if (!(allowed_drive_mask & (1 << drive)) ||
4594 fdc_state[FDC(drive)].version == FDC_NONE)
4595 disks[drive]->queue = NULL;
4596
4582 put_disk(disks[drive]); 4597 put_disk(disks[drive]);
4583 } 4598 }
4584 4599
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index f00257782fcc..cd504353b278 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -356,14 +356,14 @@ lo_direct_splice_actor(struct pipe_inode_info *pipe, struct splice_desc *sd)
356 return __splice_from_pipe(pipe, sd, lo_splice_actor); 356 return __splice_from_pipe(pipe, sd, lo_splice_actor);
357} 357}
358 358
359static int 359static ssize_t
360do_lo_receive(struct loop_device *lo, 360do_lo_receive(struct loop_device *lo,
361 struct bio_vec *bvec, int bsize, loff_t pos) 361 struct bio_vec *bvec, int bsize, loff_t pos)
362{ 362{
363 struct lo_read_data cookie; 363 struct lo_read_data cookie;
364 struct splice_desc sd; 364 struct splice_desc sd;
365 struct file *file; 365 struct file *file;
366 long retval; 366 ssize_t retval;
367 367
368 cookie.lo = lo; 368 cookie.lo = lo;
369 cookie.page = bvec->bv_page; 369 cookie.page = bvec->bv_page;
@@ -379,26 +379,28 @@ do_lo_receive(struct loop_device *lo,
379 file = lo->lo_backing_file; 379 file = lo->lo_backing_file;
380 retval = splice_direct_to_actor(file, &sd, lo_direct_splice_actor); 380 retval = splice_direct_to_actor(file, &sd, lo_direct_splice_actor);
381 381
382 if (retval < 0) 382 return retval;
383 return retval;
384 if (retval != bvec->bv_len)
385 return -EIO;
386 return 0;
387} 383}
388 384
389static int 385static int
390lo_receive(struct loop_device *lo, struct bio *bio, int bsize, loff_t pos) 386lo_receive(struct loop_device *lo, struct bio *bio, int bsize, loff_t pos)
391{ 387{
392 struct bio_vec *bvec; 388 struct bio_vec *bvec;
393 int i, ret = 0; 389 ssize_t s;
390 int i;
394 391
395 bio_for_each_segment(bvec, bio, i) { 392 bio_for_each_segment(bvec, bio, i) {
396 ret = do_lo_receive(lo, bvec, bsize, pos); 393 s = do_lo_receive(lo, bvec, bsize, pos);
397 if (ret < 0) 394 if (s < 0)
395 return s;
396
397 if (s != bvec->bv_len) {
398 zero_fill_bio(bio);
398 break; 399 break;
400 }
399 pos += bvec->bv_len; 401 pos += bvec->bv_len;
400 } 402 }
401 return ret; 403 return 0;
402} 404}
403 405
404static int do_bio_filebacked(struct loop_device *lo, struct bio *bio) 406static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index b74eab70c3d0..8eb81c96608f 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -2068,8 +2068,6 @@ static int mtip_hw_ioctl(struct driver_data *dd, unsigned int cmd,
2068 * when the read completes. 2068 * when the read completes.
2069 * @data Callback data passed to the callback function 2069 * @data Callback data passed to the callback function
2070 * when the read completes. 2070 * when the read completes.
2071 * @barrier If non-zero, this command must be completed before
2072 * issuing any other commands.
2073 * @dir Direction (read or write) 2071 * @dir Direction (read or write)
2074 * 2072 *
2075 * return value 2073 * return value
@@ -2077,7 +2075,7 @@ static int mtip_hw_ioctl(struct driver_data *dd, unsigned int cmd,
2077 */ 2075 */
2078static void mtip_hw_submit_io(struct driver_data *dd, sector_t start, 2076static void mtip_hw_submit_io(struct driver_data *dd, sector_t start,
2079 int nsect, int nents, int tag, void *callback, 2077 int nsect, int nents, int tag, void *callback,
2080 void *data, int barrier, int dir) 2078 void *data, int dir)
2081{ 2079{
2082 struct host_to_dev_fis *fis; 2080 struct host_to_dev_fis *fis;
2083 struct mtip_port *port = dd->port; 2081 struct mtip_port *port = dd->port;
@@ -2108,8 +2106,6 @@ static void mtip_hw_submit_io(struct driver_data *dd, sector_t start,
2108 *((unsigned int *) &fis->lba_low) = (start & 0xFFFFFF); 2106 *((unsigned int *) &fis->lba_low) = (start & 0xFFFFFF);
2109 *((unsigned int *) &fis->lba_low_ex) = ((start >> 24) & 0xFFFFFF); 2107 *((unsigned int *) &fis->lba_low_ex) = ((start >> 24) & 0xFFFFFF);
2110 fis->device = 1 << 6; 2108 fis->device = 1 << 6;
2111 if (barrier)
2112 fis->device |= FUA_BIT;
2113 fis->features = nsect & 0xFF; 2109 fis->features = nsect & 0xFF;
2114 fis->features_ex = (nsect >> 8) & 0xFF; 2110 fis->features_ex = (nsect >> 8) & 0xFF;
2115 fis->sect_count = ((tag << 3) | (tag >> 5)); 2111 fis->sect_count = ((tag << 3) | (tag >> 5));
@@ -3087,7 +3083,6 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
3087 tag, 3083 tag,
3088 bio_endio, 3084 bio_endio,
3089 bio, 3085 bio,
3090 bio->bi_rw & REQ_FUA,
3091 bio_data_dir(bio)); 3086 bio_data_dir(bio));
3092 } else 3087 } else
3093 bio_io_error(bio); 3088 bio_io_error(bio);
@@ -3187,6 +3182,10 @@ skip_create_disk:
3187 blk_queue_max_segments(dd->queue, MTIP_MAX_SG); 3182 blk_queue_max_segments(dd->queue, MTIP_MAX_SG);
3188 blk_queue_physical_block_size(dd->queue, 4096); 3183 blk_queue_physical_block_size(dd->queue, 4096);
3189 blk_queue_io_min(dd->queue, 4096); 3184 blk_queue_io_min(dd->queue, 4096);
3185 /*
3186 * write back cache is not supported in the device. FUA depends on
3187 * write back cache support, hence setting flush support to zero.
3188 */
3190 blk_queue_flush(dd->queue, 0); 3189 blk_queue_flush(dd->queue, 0);
3191 3190
3192 /* Set the capacity of the device in 512 byte sectors. */ 3191 /* Set the capacity of the device in 512 byte sectors. */
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
index 723d7c4946dc..e0554a8f2233 100644
--- a/drivers/block/mtip32xx/mtip32xx.h
+++ b/drivers/block/mtip32xx/mtip32xx.h
@@ -104,9 +104,6 @@
104/* BAR number used to access the HBA registers. */ 104/* BAR number used to access the HBA registers. */
105#define MTIP_ABAR 5 105#define MTIP_ABAR 5
106 106
107/* Forced Unit Access Bit */
108#define FUA_BIT 0x80
109
110#ifdef DEBUG 107#ifdef DEBUG
111 #define dbg_printk(format, arg...) \ 108 #define dbg_printk(format, arg...) \
112 printk(pr_fmt(format), ##arg); 109 printk(pr_fmt(format), ##arg);
@@ -415,8 +412,6 @@ struct driver_data {
415 412
416 atomic_t resumeflag; /* Atomic variable to track suspend/resume */ 413 atomic_t resumeflag; /* Atomic variable to track suspend/resume */
417 414
418 atomic_t eh_active; /* Flag for error handling tracking */
419
420 struct task_struct *mtip_svc_handler; /* task_struct of svc thd */ 415 struct task_struct *mtip_svc_handler; /* task_struct of svc thd */
421}; 416};
422 417
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 55eaf474d32c..d620b4495745 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -286,8 +286,6 @@
286 286
287/* used to tell the module to turn on full debugging messages */ 287/* used to tell the module to turn on full debugging messages */
288static bool debug; 288static bool debug;
289/* used to keep tray locked at all times */
290static int keeplocked;
291/* default compatibility mode */ 289/* default compatibility mode */
292static bool autoclose=1; 290static bool autoclose=1;
293static bool autoeject; 291static bool autoeject;
@@ -1204,7 +1202,7 @@ void cdrom_release(struct cdrom_device_info *cdi, fmode_t mode)
1204 cdinfo(CD_CLOSE, "Use count for \"/dev/%s\" now zero\n", cdi->name); 1202 cdinfo(CD_CLOSE, "Use count for \"/dev/%s\" now zero\n", cdi->name);
1205 cdrom_dvd_rw_close_write(cdi); 1203 cdrom_dvd_rw_close_write(cdi);
1206 1204
1207 if ((cdo->capability & CDC_LOCK) && !keeplocked) { 1205 if ((cdo->capability & CDC_LOCK) && !cdi->keeplocked) {
1208 cdinfo(CD_CLOSE, "Unlocking door!\n"); 1206 cdinfo(CD_CLOSE, "Unlocking door!\n");
1209 cdo->lock_door(cdi, 0); 1207 cdo->lock_door(cdi, 0);
1210 } 1208 }
@@ -1371,7 +1369,7 @@ static int cdrom_select_disc(struct cdrom_device_info *cdi, int slot)
1371 curslot = info->hdr.curslot; 1369 curslot = info->hdr.curslot;
1372 kfree(info); 1370 kfree(info);
1373 1371
1374 if (cdi->use_count > 1 || keeplocked) { 1372 if (cdi->use_count > 1 || cdi->keeplocked) {
1375 if (slot == CDSL_CURRENT) { 1373 if (slot == CDSL_CURRENT) {
1376 return curslot; 1374 return curslot;
1377 } else { 1375 } else {
@@ -2119,11 +2117,6 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
2119 if (!nr) 2117 if (!nr)
2120 return -ENOMEM; 2118 return -ENOMEM;
2121 2119
2122 if (!access_ok(VERIFY_WRITE, ubuf, nframes * CD_FRAMESIZE_RAW)) {
2123 ret = -EFAULT;
2124 goto out;
2125 }
2126
2127 cgc.data_direction = CGC_DATA_READ; 2120 cgc.data_direction = CGC_DATA_READ;
2128 while (nframes > 0) { 2121 while (nframes > 0) {
2129 if (nr > nframes) 2122 if (nr > nframes)
@@ -2132,7 +2125,7 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
2132 ret = cdrom_read_block(cdi, &cgc, lba, nr, 1, CD_FRAMESIZE_RAW); 2125 ret = cdrom_read_block(cdi, &cgc, lba, nr, 1, CD_FRAMESIZE_RAW);
2133 if (ret) 2126 if (ret)
2134 break; 2127 break;
2135 if (__copy_to_user(ubuf, cgc.buffer, CD_FRAMESIZE_RAW * nr)) { 2128 if (copy_to_user(ubuf, cgc.buffer, CD_FRAMESIZE_RAW * nr)) {
2136 ret = -EFAULT; 2129 ret = -EFAULT;
2137 break; 2130 break;
2138 } 2131 }
@@ -2140,7 +2133,6 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
2140 nframes -= nr; 2133 nframes -= nr;
2141 lba += nr; 2134 lba += nr;
2142 } 2135 }
2143out:
2144 kfree(cgc.buffer); 2136 kfree(cgc.buffer);
2145 return ret; 2137 return ret;
2146} 2138}
@@ -2295,7 +2287,7 @@ static int cdrom_ioctl_eject(struct cdrom_device_info *cdi)
2295 2287
2296 if (!CDROM_CAN(CDC_OPEN_TRAY)) 2288 if (!CDROM_CAN(CDC_OPEN_TRAY))
2297 return -ENOSYS; 2289 return -ENOSYS;
2298 if (cdi->use_count != 1 || keeplocked) 2290 if (cdi->use_count != 1 || cdi->keeplocked)
2299 return -EBUSY; 2291 return -EBUSY;
2300 if (CDROM_CAN(CDC_LOCK)) { 2292 if (CDROM_CAN(CDC_LOCK)) {
2301 int ret = cdi->ops->lock_door(cdi, 0); 2293 int ret = cdi->ops->lock_door(cdi, 0);
@@ -2322,7 +2314,7 @@ static int cdrom_ioctl_eject_sw(struct cdrom_device_info *cdi,
2322 2314
2323 if (!CDROM_CAN(CDC_OPEN_TRAY)) 2315 if (!CDROM_CAN(CDC_OPEN_TRAY))
2324 return -ENOSYS; 2316 return -ENOSYS;
2325 if (keeplocked) 2317 if (cdi->keeplocked)
2326 return -EBUSY; 2318 return -EBUSY;
2327 2319
2328 cdi->options &= ~(CDO_AUTO_CLOSE | CDO_AUTO_EJECT); 2320 cdi->options &= ~(CDO_AUTO_CLOSE | CDO_AUTO_EJECT);
@@ -2453,7 +2445,7 @@ static int cdrom_ioctl_lock_door(struct cdrom_device_info *cdi,
2453 if (!CDROM_CAN(CDC_LOCK)) 2445 if (!CDROM_CAN(CDC_LOCK))
2454 return -EDRIVE_CANT_DO_THIS; 2446 return -EDRIVE_CANT_DO_THIS;
2455 2447
2456 keeplocked = arg ? 1 : 0; 2448 cdi->keeplocked = arg ? 1 : 0;
2457 2449
2458 /* 2450 /*
2459 * Don't unlock the door on multiple opens by default, but allow 2451 * Don't unlock the door on multiple opens by default, but allow
diff --git a/fs/bio.c b/fs/bio.c
index b1fe82cf88cf..b980ecde026a 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -505,13 +505,9 @@ EXPORT_SYMBOL(bio_clone);
505int bio_get_nr_vecs(struct block_device *bdev) 505int bio_get_nr_vecs(struct block_device *bdev)
506{ 506{
507 struct request_queue *q = bdev_get_queue(bdev); 507 struct request_queue *q = bdev_get_queue(bdev);
508 int nr_pages; 508 return min_t(unsigned,
509 509 queue_max_segments(q),
510 nr_pages = ((queue_max_sectors(q) << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT; 510 queue_max_sectors(q) / (PAGE_SIZE >> 9) + 1);
511 if (nr_pages > queue_max_segments(q))
512 nr_pages = queue_max_segments(q);
513
514 return nr_pages;
515} 511}
516EXPORT_SYMBOL(bio_get_nr_vecs); 512EXPORT_SYMBOL(bio_get_nr_vecs);
517 513
diff --git a/fs/ioprio.c b/fs/ioprio.c
index f84b380d65e5..0f1b9515213b 100644
--- a/fs/ioprio.c
+++ b/fs/ioprio.c
@@ -51,7 +51,7 @@ int set_task_ioprio(struct task_struct *task, int ioprio)
51 ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE); 51 ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
52 if (ioc) { 52 if (ioc) {
53 ioc_ioprio_changed(ioc, ioprio); 53 ioc_ioprio_changed(ioc, ioprio);
54 put_io_context(ioc, NULL); 54 put_io_context(ioc);
55 } 55 }
56 56
57 return err; 57 return err;
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 6c6a1f008065..606cf339bb56 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -399,9 +399,6 @@ struct request_queue {
399 /* Throttle data */ 399 /* Throttle data */
400 struct throtl_data *td; 400 struct throtl_data *td;
401#endif 401#endif
402#ifdef CONFIG_LOCKDEP
403 int ioc_release_depth;
404#endif
405}; 402};
406 403
407#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ 404#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
index 35eae4b67503..7c48029dffe6 100644
--- a/include/linux/cdrom.h
+++ b/include/linux/cdrom.h
@@ -952,7 +952,8 @@ struct cdrom_device_info {
952 char name[20]; /* name of the device type */ 952 char name[20]; /* name of the device type */
953/* per-device flags */ 953/* per-device flags */
954 __u8 sanyo_slot : 2; /* Sanyo 3 CD changer support */ 954 __u8 sanyo_slot : 2; /* Sanyo 3 CD changer support */
955 __u8 reserved : 6; /* not used yet */ 955 __u8 keeplocked : 1; /* CDROM_LOCKDOOR status */
956 __u8 reserved : 5; /* not used yet */
956 int cdda_method; /* see flags */ 957 int cdda_method; /* see flags */
957 __u8 last_sense; 958 __u8 last_sense;
958 __u8 media_written; /* dirty flag, DVD+RW bookkeeping */ 959 __u8 media_written; /* dirty flag, DVD+RW bookkeeping */
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index c24f3d7fbf1e..7d4e0356f329 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -42,12 +42,6 @@ struct elevator_ops
42 elevator_merged_fn *elevator_merged_fn; 42 elevator_merged_fn *elevator_merged_fn;
43 elevator_merge_req_fn *elevator_merge_req_fn; 43 elevator_merge_req_fn *elevator_merge_req_fn;
44 elevator_allow_merge_fn *elevator_allow_merge_fn; 44 elevator_allow_merge_fn *elevator_allow_merge_fn;
45
46 /*
47 * Used for both plugged list and elevator merging and in the
48 * former case called without queue_lock. Read comment on top of
49 * attempt_plug_merge() for details.
50 */
51 elevator_bio_merged_fn *elevator_bio_merged_fn; 45 elevator_bio_merged_fn *elevator_bio_merged_fn;
52 46
53 elevator_dispatch_fn *elevator_dispatch_fn; 47 elevator_dispatch_fn *elevator_dispatch_fn;
@@ -122,7 +116,6 @@ extern void elv_dispatch_add_tail(struct request_queue *, struct request *);
122extern void elv_add_request(struct request_queue *, struct request *, int); 116extern void elv_add_request(struct request_queue *, struct request *, int);
123extern void __elv_add_request(struct request_queue *, struct request *, int); 117extern void __elv_add_request(struct request_queue *, struct request *, int);
124extern int elv_merge(struct request_queue *, struct request **, struct bio *); 118extern int elv_merge(struct request_queue *, struct request **, struct bio *);
125extern int elv_try_merge(struct request *, struct bio *);
126extern void elv_merge_requests(struct request_queue *, struct request *, 119extern void elv_merge_requests(struct request_queue *, struct request *,
127 struct request *); 120 struct request *);
128extern void elv_merged_request(struct request_queue *, struct request *, int); 121extern void elv_merged_request(struct request_queue *, struct request *, int);
@@ -155,7 +148,7 @@ extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t);
155extern int elevator_init(struct request_queue *, char *); 148extern int elevator_init(struct request_queue *, char *);
156extern void elevator_exit(struct elevator_queue *); 149extern void elevator_exit(struct elevator_queue *);
157extern int elevator_change(struct request_queue *, const char *); 150extern int elevator_change(struct request_queue *, const char *);
158extern int elv_rq_merge_ok(struct request *, struct bio *); 151extern bool elv_rq_merge_ok(struct request *, struct bio *);
159 152
160/* 153/*
161 * Helper functions. 154 * Helper functions.
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h
index 7e1371c4bccf..119773eebe31 100644
--- a/include/linux/iocontext.h
+++ b/include/linux/iocontext.h
@@ -133,7 +133,7 @@ static inline struct io_context *ioc_task_link(struct io_context *ioc)
133 133
134struct task_struct; 134struct task_struct;
135#ifdef CONFIG_BLOCK 135#ifdef CONFIG_BLOCK
136void put_io_context(struct io_context *ioc, struct request_queue *locked_q); 136void put_io_context(struct io_context *ioc);
137void exit_io_context(struct task_struct *task); 137void exit_io_context(struct task_struct *task);
138struct io_context *get_task_io_context(struct task_struct *task, 138struct io_context *get_task_io_context(struct task_struct *task,
139 gfp_t gfp_flags, int node); 139 gfp_t gfp_flags, int node);
@@ -141,8 +141,7 @@ void ioc_ioprio_changed(struct io_context *ioc, int ioprio);
141void ioc_cgroup_changed(struct io_context *ioc); 141void ioc_cgroup_changed(struct io_context *ioc);
142#else 142#else
143struct io_context; 143struct io_context;
144static inline void put_io_context(struct io_context *ioc, 144static inline void put_io_context(struct io_context *ioc) { }
145 struct request_queue *locked_q) { }
146static inline void exit_io_context(struct task_struct *task) { } 145static inline void exit_io_context(struct task_struct *task) { }
147#endif 146#endif
148 147
diff --git a/kernel/fork.c b/kernel/fork.c
index 1b2ef3c23ae4..b77fd559c78e 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -910,7 +910,7 @@ static int copy_io(unsigned long clone_flags, struct task_struct *tsk)
910 return -ENOMEM; 910 return -ENOMEM;
911 911
912 new_ioc->ioprio = ioc->ioprio; 912 new_ioc->ioprio = ioc->ioprio;
913 put_io_context(new_ioc, NULL); 913 put_io_context(new_ioc);
914 } 914 }
915#endif 915#endif
916 return 0; 916 return 0;
diff --git a/kernel/relay.c b/kernel/relay.c
index 4335e1d7ee2d..ab56a1764d4d 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -164,10 +164,14 @@ depopulate:
164 */ 164 */
165static struct rchan_buf *relay_create_buf(struct rchan *chan) 165static struct rchan_buf *relay_create_buf(struct rchan *chan)
166{ 166{
167 struct rchan_buf *buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL); 167 struct rchan_buf *buf;
168 if (!buf) 168
169 if (chan->n_subbufs > UINT_MAX / sizeof(size_t *))
169 return NULL; 170 return NULL;
170 171
172 buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL);
173 if (!buf)
174 return NULL;
171 buf->padding = kmalloc(chan->n_subbufs * sizeof(size_t *), GFP_KERNEL); 175 buf->padding = kmalloc(chan->n_subbufs * sizeof(size_t *), GFP_KERNEL);
172 if (!buf->padding) 176 if (!buf->padding)
173 goto free_buf; 177 goto free_buf;
@@ -574,6 +578,8 @@ struct rchan *relay_open(const char *base_filename,
574 578
575 if (!(subbuf_size && n_subbufs)) 579 if (!(subbuf_size && n_subbufs))
576 return NULL; 580 return NULL;
581 if (subbuf_size > UINT_MAX / n_subbufs)
582 return NULL;
577 583
578 chan = kzalloc(sizeof(struct rchan), GFP_KERNEL); 584 chan = kzalloc(sizeof(struct rchan), GFP_KERNEL);
579 if (!chan) 585 if (!chan)