aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2010-08-11 03:36:51 -0400
committerJiri Kosina <jkosina@suse.cz>2010-08-11 03:36:51 -0400
commit6396fc3b3ff3f6b942992b653a62df11dcef9bea (patch)
treedb3c7cbe833b43c653adc99f70941431c5ff7c4e /block
parent4785879e4d340e24e54f6de2ccfc42728b912808 (diff)
parent3d30701b58970425e1d45994d6cb82f828924fdd (diff)
Merge branch 'master' into for-next
Conflicts: fs/exofs/inode.c
Diffstat (limited to 'block')
-rw-r--r--block/blk-barrier.c35
-rw-r--r--block/blk-core.c117
-rw-r--r--block/blk-exec.c2
-rw-r--r--block/blk-lib.c56
-rw-r--r--block/blk-map.c2
-rw-r--r--block/blk-merge.c9
-rw-r--r--block/blk-settings.c17
-rw-r--r--block/blk-sysfs.c82
-rw-r--r--block/blk.h6
-rw-r--r--block/cfq-iosched.c21
-rw-r--r--block/compat_ioctl.c56
-rw-r--r--block/elevator.c19
-rw-r--r--block/ioctl.c21
13 files changed, 214 insertions, 229 deletions
diff --git a/block/blk-barrier.c b/block/blk-barrier.c
index 0d710c9d403b..f0faefca032f 100644
--- a/block/blk-barrier.c
+++ b/block/blk-barrier.c
@@ -13,7 +13,6 @@
13 * blk_queue_ordered - does this queue support ordered writes 13 * blk_queue_ordered - does this queue support ordered writes
14 * @q: the request queue 14 * @q: the request queue
15 * @ordered: one of QUEUE_ORDERED_* 15 * @ordered: one of QUEUE_ORDERED_*
16 * @prepare_flush_fn: rq setup helper for cache flush ordered writes
17 * 16 *
18 * Description: 17 * Description:
19 * For journalled file systems, doing ordered writes on a commit 18 * For journalled file systems, doing ordered writes on a commit
@@ -22,15 +21,8 @@
22 * feature should call this function and indicate so. 21 * feature should call this function and indicate so.
23 * 22 *
24 **/ 23 **/
25int blk_queue_ordered(struct request_queue *q, unsigned ordered, 24int blk_queue_ordered(struct request_queue *q, unsigned ordered)
26 prepare_flush_fn *prepare_flush_fn)
27{ 25{
28 if (!prepare_flush_fn && (ordered & (QUEUE_ORDERED_DO_PREFLUSH |
29 QUEUE_ORDERED_DO_POSTFLUSH))) {
30 printk(KERN_ERR "%s: prepare_flush_fn required\n", __func__);
31 return -EINVAL;
32 }
33
34 if (ordered != QUEUE_ORDERED_NONE && 26 if (ordered != QUEUE_ORDERED_NONE &&
35 ordered != QUEUE_ORDERED_DRAIN && 27 ordered != QUEUE_ORDERED_DRAIN &&
36 ordered != QUEUE_ORDERED_DRAIN_FLUSH && 28 ordered != QUEUE_ORDERED_DRAIN_FLUSH &&
@@ -44,7 +36,6 @@ int blk_queue_ordered(struct request_queue *q, unsigned ordered,
44 36
45 q->ordered = ordered; 37 q->ordered = ordered;
46 q->next_ordered = ordered; 38 q->next_ordered = ordered;
47 q->prepare_flush_fn = prepare_flush_fn;
48 39
49 return 0; 40 return 0;
50} 41}
@@ -79,7 +70,7 @@ unsigned blk_ordered_req_seq(struct request *rq)
79 * 70 *
80 * http://thread.gmane.org/gmane.linux.kernel/537473 71 * http://thread.gmane.org/gmane.linux.kernel/537473
81 */ 72 */
82 if (!blk_fs_request(rq)) 73 if (rq->cmd_type != REQ_TYPE_FS)
83 return QUEUE_ORDSEQ_DRAIN; 74 return QUEUE_ORDSEQ_DRAIN;
84 75
85 if ((rq->cmd_flags & REQ_ORDERED_COLOR) == 76 if ((rq->cmd_flags & REQ_ORDERED_COLOR) ==
@@ -143,10 +134,10 @@ static void queue_flush(struct request_queue *q, unsigned which)
143 } 134 }
144 135
145 blk_rq_init(q, rq); 136 blk_rq_init(q, rq);
146 rq->cmd_flags = REQ_HARDBARRIER; 137 rq->cmd_type = REQ_TYPE_FS;
147 rq->rq_disk = q->bar_rq.rq_disk; 138 rq->cmd_flags = REQ_HARDBARRIER | REQ_FLUSH;
139 rq->rq_disk = q->orig_bar_rq->rq_disk;
148 rq->end_io = end_io; 140 rq->end_io = end_io;
149 q->prepare_flush_fn(q, rq);
150 141
151 elv_insert(q, rq, ELEVATOR_INSERT_FRONT); 142 elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
152} 143}
@@ -203,7 +194,7 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp)
203 /* initialize proxy request and queue it */ 194 /* initialize proxy request and queue it */
204 blk_rq_init(q, rq); 195 blk_rq_init(q, rq);
205 if (bio_data_dir(q->orig_bar_rq->bio) == WRITE) 196 if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
206 rq->cmd_flags |= REQ_RW; 197 rq->cmd_flags |= REQ_WRITE;
207 if (q->ordered & QUEUE_ORDERED_DO_FUA) 198 if (q->ordered & QUEUE_ORDERED_DO_FUA)
208 rq->cmd_flags |= REQ_FUA; 199 rq->cmd_flags |= REQ_FUA;
209 init_request_from_bio(rq, q->orig_bar_rq->bio); 200 init_request_from_bio(rq, q->orig_bar_rq->bio);
@@ -236,7 +227,8 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp)
236bool blk_do_ordered(struct request_queue *q, struct request **rqp) 227bool blk_do_ordered(struct request_queue *q, struct request **rqp)
237{ 228{
238 struct request *rq = *rqp; 229 struct request *rq = *rqp;
239 const int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq); 230 const int is_barrier = rq->cmd_type == REQ_TYPE_FS &&
231 (rq->cmd_flags & REQ_HARDBARRIER);
240 232
241 if (!q->ordseq) { 233 if (!q->ordseq) {
242 if (!is_barrier) 234 if (!is_barrier)
@@ -261,7 +253,7 @@ bool blk_do_ordered(struct request_queue *q, struct request **rqp)
261 */ 253 */
262 254
263 /* Special requests are not subject to ordering rules. */ 255 /* Special requests are not subject to ordering rules. */
264 if (!blk_fs_request(rq) && 256 if (rq->cmd_type != REQ_TYPE_FS &&
265 rq != &q->pre_flush_rq && rq != &q->post_flush_rq) 257 rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
266 return true; 258 return true;
267 259
@@ -319,6 +311,15 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
319 if (!q) 311 if (!q)
320 return -ENXIO; 312 return -ENXIO;
321 313
314 /*
315 * some block devices may not have their queue correctly set up here
316 * (e.g. loop device without a backing file) and so issuing a flush
317 * here will panic. Ensure there is a request function before issuing
318 * the barrier.
319 */
320 if (!q->make_request_fn)
321 return -ENXIO;
322
322 bio = bio_alloc(gfp_mask, 0); 323 bio = bio_alloc(gfp_mask, 0);
323 bio->bi_end_io = bio_end_empty_barrier; 324 bio->bi_end_io = bio_end_empty_barrier;
324 bio->bi_bdev = bdev; 325 bio->bi_bdev = bdev;
diff --git a/block/blk-core.c b/block/blk-core.c
index 7ac24fa71f7a..77411486b111 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -184,7 +184,7 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
184 printk(KERN_INFO " bio %p, biotail %p, buffer %p, len %u\n", 184 printk(KERN_INFO " bio %p, biotail %p, buffer %p, len %u\n",
185 rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq)); 185 rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq));
186 186
187 if (blk_pc_request(rq)) { 187 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
188 printk(KERN_INFO " cdb: "); 188 printk(KERN_INFO " cdb: ");
189 for (bit = 0; bit < BLK_MAX_CDB; bit++) 189 for (bit = 0; bit < BLK_MAX_CDB; bit++)
190 printk("%02x ", rq->cmd[bit]); 190 printk("%02x ", rq->cmd[bit]);
@@ -608,6 +608,7 @@ blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
608 608
609 q->request_fn = rfn; 609 q->request_fn = rfn;
610 q->prep_rq_fn = NULL; 610 q->prep_rq_fn = NULL;
611 q->unprep_rq_fn = NULL;
611 q->unplug_fn = generic_unplug_device; 612 q->unplug_fn = generic_unplug_device;
612 q->queue_flags = QUEUE_FLAG_DEFAULT; 613 q->queue_flags = QUEUE_FLAG_DEFAULT;
613 q->queue_lock = lock; 614 q->queue_lock = lock;
@@ -1135,30 +1136,46 @@ void blk_put_request(struct request *req)
1135} 1136}
1136EXPORT_SYMBOL(blk_put_request); 1137EXPORT_SYMBOL(blk_put_request);
1137 1138
1139/**
1140 * blk_add_request_payload - add a payload to a request
1141 * @rq: request to update
1142 * @page: page backing the payload
1143 * @len: length of the payload.
1144 *
1145 * This allows to later add a payload to an already submitted request by
1146 * a block driver. The driver needs to take care of freeing the payload
1147 * itself.
1148 *
1149 * Note that this is a quite horrible hack and nothing but handling of
1150 * discard requests should ever use it.
1151 */
1152void blk_add_request_payload(struct request *rq, struct page *page,
1153 unsigned int len)
1154{
1155 struct bio *bio = rq->bio;
1156
1157 bio->bi_io_vec->bv_page = page;
1158 bio->bi_io_vec->bv_offset = 0;
1159 bio->bi_io_vec->bv_len = len;
1160
1161 bio->bi_size = len;
1162 bio->bi_vcnt = 1;
1163 bio->bi_phys_segments = 1;
1164
1165 rq->__data_len = rq->resid_len = len;
1166 rq->nr_phys_segments = 1;
1167 rq->buffer = bio_data(bio);
1168}
1169EXPORT_SYMBOL_GPL(blk_add_request_payload);
1170
1138void init_request_from_bio(struct request *req, struct bio *bio) 1171void init_request_from_bio(struct request *req, struct bio *bio)
1139{ 1172{
1140 req->cpu = bio->bi_comp_cpu; 1173 req->cpu = bio->bi_comp_cpu;
1141 req->cmd_type = REQ_TYPE_FS; 1174 req->cmd_type = REQ_TYPE_FS;
1142 1175
1143 /* 1176 req->cmd_flags |= bio->bi_rw & REQ_COMMON_MASK;
1144 * Inherit FAILFAST from bio (for read-ahead, and explicit 1177 if (bio->bi_rw & REQ_RAHEAD)
1145 * FAILFAST). FAILFAST flags are identical for req and bio.
1146 */
1147 if (bio_rw_flagged(bio, BIO_RW_AHEAD))
1148 req->cmd_flags |= REQ_FAILFAST_MASK; 1178 req->cmd_flags |= REQ_FAILFAST_MASK;
1149 else
1150 req->cmd_flags |= bio->bi_rw & REQ_FAILFAST_MASK;
1151
1152 if (bio_rw_flagged(bio, BIO_RW_DISCARD))
1153 req->cmd_flags |= REQ_DISCARD;
1154 if (bio_rw_flagged(bio, BIO_RW_BARRIER))
1155 req->cmd_flags |= REQ_HARDBARRIER;
1156 if (bio_rw_flagged(bio, BIO_RW_SYNCIO))
1157 req->cmd_flags |= REQ_RW_SYNC;
1158 if (bio_rw_flagged(bio, BIO_RW_META))
1159 req->cmd_flags |= REQ_RW_META;
1160 if (bio_rw_flagged(bio, BIO_RW_NOIDLE))
1161 req->cmd_flags |= REQ_NOIDLE;
1162 1179
1163 req->errors = 0; 1180 req->errors = 0;
1164 req->__sector = bio->bi_sector; 1181 req->__sector = bio->bi_sector;
@@ -1181,12 +1198,12 @@ static int __make_request(struct request_queue *q, struct bio *bio)
1181 int el_ret; 1198 int el_ret;
1182 unsigned int bytes = bio->bi_size; 1199 unsigned int bytes = bio->bi_size;
1183 const unsigned short prio = bio_prio(bio); 1200 const unsigned short prio = bio_prio(bio);
1184 const bool sync = bio_rw_flagged(bio, BIO_RW_SYNCIO); 1201 const bool sync = (bio->bi_rw & REQ_SYNC);
1185 const bool unplug = bio_rw_flagged(bio, BIO_RW_UNPLUG); 1202 const bool unplug = (bio->bi_rw & REQ_UNPLUG);
1186 const unsigned int ff = bio->bi_rw & REQ_FAILFAST_MASK; 1203 const unsigned int ff = bio->bi_rw & REQ_FAILFAST_MASK;
1187 int rw_flags; 1204 int rw_flags;
1188 1205
1189 if (bio_rw_flagged(bio, BIO_RW_BARRIER) && 1206 if ((bio->bi_rw & REQ_HARDBARRIER) &&
1190 (q->next_ordered == QUEUE_ORDERED_NONE)) { 1207 (q->next_ordered == QUEUE_ORDERED_NONE)) {
1191 bio_endio(bio, -EOPNOTSUPP); 1208 bio_endio(bio, -EOPNOTSUPP);
1192 return 0; 1209 return 0;
@@ -1200,7 +1217,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
1200 1217
1201 spin_lock_irq(q->queue_lock); 1218 spin_lock_irq(q->queue_lock);
1202 1219
1203 if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER)) || elv_queue_empty(q)) 1220 if (unlikely((bio->bi_rw & REQ_HARDBARRIER)) || elv_queue_empty(q))
1204 goto get_rq; 1221 goto get_rq;
1205 1222
1206 el_ret = elv_merge(q, &req, bio); 1223 el_ret = elv_merge(q, &req, bio);
@@ -1275,7 +1292,7 @@ get_rq:
1275 */ 1292 */
1276 rw_flags = bio_data_dir(bio); 1293 rw_flags = bio_data_dir(bio);
1277 if (sync) 1294 if (sync)
1278 rw_flags |= REQ_RW_SYNC; 1295 rw_flags |= REQ_SYNC;
1279 1296
1280 /* 1297 /*
1281 * Grab a free request. This is might sleep but can not fail. 1298 * Grab a free request. This is might sleep but can not fail.
@@ -1464,7 +1481,7 @@ static inline void __generic_make_request(struct bio *bio)
1464 goto end_io; 1481 goto end_io;
1465 } 1482 }
1466 1483
1467 if (unlikely(!bio_rw_flagged(bio, BIO_RW_DISCARD) && 1484 if (unlikely(!(bio->bi_rw & REQ_DISCARD) &&
1468 nr_sectors > queue_max_hw_sectors(q))) { 1485 nr_sectors > queue_max_hw_sectors(q))) {
1469 printk(KERN_ERR "bio too big device %s (%u > %u)\n", 1486 printk(KERN_ERR "bio too big device %s (%u > %u)\n",
1470 bdevname(bio->bi_bdev, b), 1487 bdevname(bio->bi_bdev, b),
@@ -1497,8 +1514,7 @@ static inline void __generic_make_request(struct bio *bio)
1497 if (bio_check_eod(bio, nr_sectors)) 1514 if (bio_check_eod(bio, nr_sectors))
1498 goto end_io; 1515 goto end_io;
1499 1516
1500 if (bio_rw_flagged(bio, BIO_RW_DISCARD) && 1517 if ((bio->bi_rw & REQ_DISCARD) && !blk_queue_discard(q)) {
1501 !blk_queue_discard(q)) {
1502 err = -EOPNOTSUPP; 1518 err = -EOPNOTSUPP;
1503 goto end_io; 1519 goto end_io;
1504 } 1520 }
@@ -1583,7 +1599,7 @@ void submit_bio(int rw, struct bio *bio)
1583 * If it's a regular read/write or a barrier with data attached, 1599 * If it's a regular read/write or a barrier with data attached,
1584 * go through the normal accounting stuff before submission. 1600 * go through the normal accounting stuff before submission.
1585 */ 1601 */
1586 if (bio_has_data(bio) && !(rw & (1 << BIO_RW_DISCARD))) { 1602 if (bio_has_data(bio) && !(rw & REQ_DISCARD)) {
1587 if (rw & WRITE) { 1603 if (rw & WRITE) {
1588 count_vm_events(PGPGOUT, count); 1604 count_vm_events(PGPGOUT, count);
1589 } else { 1605 } else {
@@ -1628,6 +1644,9 @@ EXPORT_SYMBOL(submit_bio);
1628 */ 1644 */
1629int blk_rq_check_limits(struct request_queue *q, struct request *rq) 1645int blk_rq_check_limits(struct request_queue *q, struct request *rq)
1630{ 1646{
1647 if (rq->cmd_flags & REQ_DISCARD)
1648 return 0;
1649
1631 if (blk_rq_sectors(rq) > queue_max_sectors(q) || 1650 if (blk_rq_sectors(rq) > queue_max_sectors(q) ||
1632 blk_rq_bytes(rq) > queue_max_hw_sectors(q) << 9) { 1651 blk_rq_bytes(rq) > queue_max_hw_sectors(q) << 9) {
1633 printk(KERN_ERR "%s: over max size limit.\n", __func__); 1652 printk(KERN_ERR "%s: over max size limit.\n", __func__);
@@ -1796,7 +1815,7 @@ struct request *blk_peek_request(struct request_queue *q)
1796 * sees this request (possibly after 1815 * sees this request (possibly after
1797 * requeueing). Notify IO scheduler. 1816 * requeueing). Notify IO scheduler.
1798 */ 1817 */
1799 if (blk_sorted_rq(rq)) 1818 if (rq->cmd_flags & REQ_SORTED)
1800 elv_activate_rq(q, rq); 1819 elv_activate_rq(q, rq);
1801 1820
1802 /* 1821 /*
@@ -1984,10 +2003,11 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
1984 * TODO: tj: This is too subtle. It would be better to let 2003 * TODO: tj: This is too subtle. It would be better to let
1985 * low level drivers do what they see fit. 2004 * low level drivers do what they see fit.
1986 */ 2005 */
1987 if (blk_fs_request(req)) 2006 if (req->cmd_type == REQ_TYPE_FS)
1988 req->errors = 0; 2007 req->errors = 0;
1989 2008
1990 if (error && (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))) { 2009 if (error && req->cmd_type == REQ_TYPE_FS &&
2010 !(req->cmd_flags & REQ_QUIET)) {
1991 printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n", 2011 printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n",
1992 req->rq_disk ? req->rq_disk->disk_name : "?", 2012 req->rq_disk ? req->rq_disk->disk_name : "?",
1993 (unsigned long long)blk_rq_pos(req)); 2013 (unsigned long long)blk_rq_pos(req));
@@ -2074,7 +2094,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
2074 req->buffer = bio_data(req->bio); 2094 req->buffer = bio_data(req->bio);
2075 2095
2076 /* update sector only for requests with clear definition of sector */ 2096 /* update sector only for requests with clear definition of sector */
2077 if (blk_fs_request(req) || blk_discard_rq(req)) 2097 if (req->cmd_type == REQ_TYPE_FS || (req->cmd_flags & REQ_DISCARD))
2078 req->__sector += total_bytes >> 9; 2098 req->__sector += total_bytes >> 9;
2079 2099
2080 /* mixed attributes always follow the first bio */ 2100 /* mixed attributes always follow the first bio */
@@ -2111,11 +2131,32 @@ static bool blk_update_bidi_request(struct request *rq, int error,
2111 blk_update_request(rq->next_rq, error, bidi_bytes)) 2131 blk_update_request(rq->next_rq, error, bidi_bytes))
2112 return true; 2132 return true;
2113 2133
2114 add_disk_randomness(rq->rq_disk); 2134 if (blk_queue_add_random(rq->q))
2135 add_disk_randomness(rq->rq_disk);
2115 2136
2116 return false; 2137 return false;
2117} 2138}
2118 2139
2140/**
2141 * blk_unprep_request - unprepare a request
2142 * @req: the request
2143 *
2144 * This function makes a request ready for complete resubmission (or
2145 * completion). It happens only after all error handling is complete,
2146 * so represents the appropriate moment to deallocate any resources
2147 * that were allocated to the request in the prep_rq_fn. The queue
2148 * lock is held when calling this.
2149 */
2150void blk_unprep_request(struct request *req)
2151{
2152 struct request_queue *q = req->q;
2153
2154 req->cmd_flags &= ~REQ_DONTPREP;
2155 if (q->unprep_rq_fn)
2156 q->unprep_rq_fn(q, req);
2157}
2158EXPORT_SYMBOL_GPL(blk_unprep_request);
2159
2119/* 2160/*
2120 * queue lock must be held 2161 * queue lock must be held
2121 */ 2162 */
@@ -2126,11 +2167,15 @@ static void blk_finish_request(struct request *req, int error)
2126 2167
2127 BUG_ON(blk_queued_rq(req)); 2168 BUG_ON(blk_queued_rq(req));
2128 2169
2129 if (unlikely(laptop_mode) && blk_fs_request(req)) 2170 if (unlikely(laptop_mode) && req->cmd_type == REQ_TYPE_FS)
2130 laptop_io_completion(&req->q->backing_dev_info); 2171 laptop_io_completion(&req->q->backing_dev_info);
2131 2172
2132 blk_delete_timer(req); 2173 blk_delete_timer(req);
2133 2174
2175 if (req->cmd_flags & REQ_DONTPREP)
2176 blk_unprep_request(req);
2177
2178
2134 blk_account_io_done(req); 2179 blk_account_io_done(req);
2135 2180
2136 if (req->end_io) 2181 if (req->end_io)
@@ -2363,7 +2408,7 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
2363 struct bio *bio) 2408 struct bio *bio)
2364{ 2409{
2365 /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */ 2410 /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */
2366 rq->cmd_flags |= bio->bi_rw & REQ_RW; 2411 rq->cmd_flags |= bio->bi_rw & REQ_WRITE;
2367 2412
2368 if (bio_has_data(bio)) { 2413 if (bio_has_data(bio)) {
2369 rq->nr_phys_segments = bio_phys_segments(q, bio); 2414 rq->nr_phys_segments = bio_phys_segments(q, bio);
@@ -2450,6 +2495,8 @@ static void __blk_rq_prep_clone(struct request *dst, struct request *src)
2450{ 2495{
2451 dst->cpu = src->cpu; 2496 dst->cpu = src->cpu;
2452 dst->cmd_flags = (rq_data_dir(src) | REQ_NOMERGE); 2497 dst->cmd_flags = (rq_data_dir(src) | REQ_NOMERGE);
2498 if (src->cmd_flags & REQ_DISCARD)
2499 dst->cmd_flags |= REQ_DISCARD;
2453 dst->cmd_type = src->cmd_type; 2500 dst->cmd_type = src->cmd_type;
2454 dst->__sector = blk_rq_pos(src); 2501 dst->__sector = blk_rq_pos(src);
2455 dst->__data_len = blk_rq_bytes(src); 2502 dst->__data_len = blk_rq_bytes(src);
diff --git a/block/blk-exec.c b/block/blk-exec.c
index 49557e91f0da..e1672f14840e 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -57,7 +57,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
57 __elv_add_request(q, rq, where, 1); 57 __elv_add_request(q, rq, where, 1);
58 __generic_unplug_device(q); 58 __generic_unplug_device(q);
59 /* the queue is stopped so it won't be plugged+unplugged */ 59 /* the queue is stopped so it won't be plugged+unplugged */
60 if (blk_pm_resume_request(rq)) 60 if (rq->cmd_type == REQ_TYPE_PM_RESUME)
61 q->request_fn(q); 61 q->request_fn(q);
62 spin_unlock_irq(q->queue_lock); 62 spin_unlock_irq(q->queue_lock);
63} 63}
diff --git a/block/blk-lib.c b/block/blk-lib.c
index d0216b9f22d4..c1fc55a83ba1 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -19,7 +19,6 @@ static void blkdev_discard_end_io(struct bio *bio, int err)
19 19
20 if (bio->bi_private) 20 if (bio->bi_private)
21 complete(bio->bi_private); 21 complete(bio->bi_private);
22 __free_page(bio_page(bio));
23 22
24 bio_put(bio); 23 bio_put(bio);
25} 24}
@@ -42,8 +41,8 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
42 struct request_queue *q = bdev_get_queue(bdev); 41 struct request_queue *q = bdev_get_queue(bdev);
43 int type = flags & BLKDEV_IFL_BARRIER ? 42 int type = flags & BLKDEV_IFL_BARRIER ?
44 DISCARD_BARRIER : DISCARD_NOBARRIER; 43 DISCARD_BARRIER : DISCARD_NOBARRIER;
44 unsigned int max_discard_sectors;
45 struct bio *bio; 45 struct bio *bio;
46 struct page *page;
47 int ret = 0; 46 int ret = 0;
48 47
49 if (!q) 48 if (!q)
@@ -52,36 +51,30 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
52 if (!blk_queue_discard(q)) 51 if (!blk_queue_discard(q))
53 return -EOPNOTSUPP; 52 return -EOPNOTSUPP;
54 53
55 while (nr_sects && !ret) { 54 /*
56 unsigned int sector_size = q->limits.logical_block_size; 55 * Ensure that max_discard_sectors is of the proper
57 unsigned int max_discard_sectors = 56 * granularity
58 min(q->limits.max_discard_sectors, UINT_MAX >> 9); 57 */
58 max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
59 if (q->limits.discard_granularity) {
60 unsigned int disc_sects = q->limits.discard_granularity >> 9;
59 61
62 max_discard_sectors &= ~(disc_sects - 1);
63 }
64
65 while (nr_sects && !ret) {
60 bio = bio_alloc(gfp_mask, 1); 66 bio = bio_alloc(gfp_mask, 1);
61 if (!bio) 67 if (!bio) {
62 goto out; 68 ret = -ENOMEM;
69 break;
70 }
71
63 bio->bi_sector = sector; 72 bio->bi_sector = sector;
64 bio->bi_end_io = blkdev_discard_end_io; 73 bio->bi_end_io = blkdev_discard_end_io;
65 bio->bi_bdev = bdev; 74 bio->bi_bdev = bdev;
66 if (flags & BLKDEV_IFL_WAIT) 75 if (flags & BLKDEV_IFL_WAIT)
67 bio->bi_private = &wait; 76 bio->bi_private = &wait;
68 77
69 /*
70 * Add a zeroed one-sector payload as that's what
71 * our current implementations need. If we'll ever need
72 * more the interface will need revisiting.
73 */
74 page = alloc_page(gfp_mask | __GFP_ZERO);
75 if (!page)
76 goto out_free_bio;
77 if (bio_add_pc_page(q, bio, page, sector_size, 0) < sector_size)
78 goto out_free_page;
79
80 /*
81 * And override the bio size - the way discard works we
82 * touch many more blocks on disk than the actual payload
83 * length.
84 */
85 if (nr_sects > max_discard_sectors) { 78 if (nr_sects > max_discard_sectors) {
86 bio->bi_size = max_discard_sectors << 9; 79 bio->bi_size = max_discard_sectors << 9;
87 nr_sects -= max_discard_sectors; 80 nr_sects -= max_discard_sectors;
@@ -103,13 +96,8 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
103 ret = -EIO; 96 ret = -EIO;
104 bio_put(bio); 97 bio_put(bio);
105 } 98 }
99
106 return ret; 100 return ret;
107out_free_page:
108 __free_page(page);
109out_free_bio:
110 bio_put(bio);
111out:
112 return -ENOMEM;
113} 101}
114EXPORT_SYMBOL(blkdev_issue_discard); 102EXPORT_SYMBOL(blkdev_issue_discard);
115 103
@@ -157,7 +145,7 @@ static void bio_batch_end_io(struct bio *bio, int err)
157int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 145int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
158 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) 146 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
159{ 147{
160 int ret = 0; 148 int ret;
161 struct bio *bio; 149 struct bio *bio;
162 struct bio_batch bb; 150 struct bio_batch bb;
163 unsigned int sz, issued = 0; 151 unsigned int sz, issued = 0;
@@ -175,11 +163,14 @@ int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
175 return ret; 163 return ret;
176 } 164 }
177submit: 165submit:
166 ret = 0;
178 while (nr_sects != 0) { 167 while (nr_sects != 0) {
179 bio = bio_alloc(gfp_mask, 168 bio = bio_alloc(gfp_mask,
180 min(nr_sects, (sector_t)BIO_MAX_PAGES)); 169 min(nr_sects, (sector_t)BIO_MAX_PAGES));
181 if (!bio) 170 if (!bio) {
171 ret = -ENOMEM;
182 break; 172 break;
173 }
183 174
184 bio->bi_sector = sector; 175 bio->bi_sector = sector;
185 bio->bi_bdev = bdev; 176 bio->bi_bdev = bdev;
@@ -198,6 +189,7 @@ submit:
198 if (ret < (sz << 9)) 189 if (ret < (sz << 9))
199 break; 190 break;
200 } 191 }
192 ret = 0;
201 issued++; 193 issued++;
202 submit_bio(WRITE, bio); 194 submit_bio(WRITE, bio);
203 } 195 }
diff --git a/block/blk-map.c b/block/blk-map.c
index 9083cf0180cc..c65d7593f7f1 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -307,7 +307,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
307 return PTR_ERR(bio); 307 return PTR_ERR(bio);
308 308
309 if (rq_data_dir(rq) == WRITE) 309 if (rq_data_dir(rq) == WRITE)
310 bio->bi_rw |= (1 << BIO_RW); 310 bio->bi_rw |= (1 << REQ_WRITE);
311 311
312 if (do_copy) 312 if (do_copy)
313 rq->cmd_flags |= REQ_COPY_USER; 313 rq->cmd_flags |= REQ_COPY_USER;
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 5e7dc9973458..3b0cd4249671 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -12,7 +12,6 @@
12static unsigned int __blk_recalc_rq_segments(struct request_queue *q, 12static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
13 struct bio *bio) 13 struct bio *bio)
14{ 14{
15 unsigned int phys_size;
16 struct bio_vec *bv, *bvprv = NULL; 15 struct bio_vec *bv, *bvprv = NULL;
17 int cluster, i, high, highprv = 1; 16 int cluster, i, high, highprv = 1;
18 unsigned int seg_size, nr_phys_segs; 17 unsigned int seg_size, nr_phys_segs;
@@ -24,7 +23,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
24 fbio = bio; 23 fbio = bio;
25 cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); 24 cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
26 seg_size = 0; 25 seg_size = 0;
27 phys_size = nr_phys_segs = 0; 26 nr_phys_segs = 0;
28 for_each_bio(bio) { 27 for_each_bio(bio) {
29 bio_for_each_segment(bv, bio, i) { 28 bio_for_each_segment(bv, bio, i) {
30 /* 29 /*
@@ -180,7 +179,7 @@ new_segment:
180 } 179 }
181 180
182 if (q->dma_drain_size && q->dma_drain_needed(rq)) { 181 if (q->dma_drain_size && q->dma_drain_needed(rq)) {
183 if (rq->cmd_flags & REQ_RW) 182 if (rq->cmd_flags & REQ_WRITE)
184 memset(q->dma_drain_buffer, 0, q->dma_drain_size); 183 memset(q->dma_drain_buffer, 0, q->dma_drain_size);
185 184
186 sg->page_link &= ~0x02; 185 sg->page_link &= ~0x02;
@@ -226,7 +225,7 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
226{ 225{
227 unsigned short max_sectors; 226 unsigned short max_sectors;
228 227
229 if (unlikely(blk_pc_request(req))) 228 if (unlikely(req->cmd_type == REQ_TYPE_BLOCK_PC))
230 max_sectors = queue_max_hw_sectors(q); 229 max_sectors = queue_max_hw_sectors(q);
231 else 230 else
232 max_sectors = queue_max_sectors(q); 231 max_sectors = queue_max_sectors(q);
@@ -250,7 +249,7 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
250{ 249{
251 unsigned short max_sectors; 250 unsigned short max_sectors;
252 251
253 if (unlikely(blk_pc_request(req))) 252 if (unlikely(req->cmd_type == REQ_TYPE_BLOCK_PC))
254 max_sectors = queue_max_hw_sectors(q); 253 max_sectors = queue_max_hw_sectors(q);
255 else 254 else
256 max_sectors = queue_max_sectors(q); 255 max_sectors = queue_max_sectors(q);
diff --git a/block/blk-settings.c b/block/blk-settings.c
index f5ed5a1187ba..a234f4bf1d6f 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -37,6 +37,23 @@ void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
37EXPORT_SYMBOL(blk_queue_prep_rq); 37EXPORT_SYMBOL(blk_queue_prep_rq);
38 38
39/** 39/**
40 * blk_queue_unprep_rq - set an unprepare_request function for queue
41 * @q: queue
42 * @ufn: unprepare_request function
43 *
44 * It's possible for a queue to register an unprepare_request callback
45 * which is invoked before the request is finally completed. The goal
46 * of the function is to deallocate any data that was allocated in the
47 * prepare_request callback.
48 *
49 */
50void blk_queue_unprep_rq(struct request_queue *q, unprep_rq_fn *ufn)
51{
52 q->unprep_rq_fn = ufn;
53}
54EXPORT_SYMBOL(blk_queue_unprep_rq);
55
56/**
40 * blk_queue_merge_bvec - set a merge_bvec function for queue 57 * blk_queue_merge_bvec - set a merge_bvec function for queue
41 * @q: queue 58 * @q: queue
42 * @mbfn: merge_bvec_fn 59 * @mbfn: merge_bvec_fn
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 306759bbdf1b..001ab18078f5 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -180,26 +180,36 @@ static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
180 return queue_var_show(max_hw_sectors_kb, (page)); 180 return queue_var_show(max_hw_sectors_kb, (page));
181} 181}
182 182
183static ssize_t queue_nonrot_show(struct request_queue *q, char *page) 183#define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \
184{ 184static ssize_t \
185 return queue_var_show(!blk_queue_nonrot(q), page); 185queue_show_##name(struct request_queue *q, char *page) \
186{ \
187 int bit; \
188 bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \
189 return queue_var_show(neg ? !bit : bit, page); \
190} \
191static ssize_t \
192queue_store_##name(struct request_queue *q, const char *page, size_t count) \
193{ \
194 unsigned long val; \
195 ssize_t ret; \
196 ret = queue_var_store(&val, page, count); \
197 if (neg) \
198 val = !val; \
199 \
200 spin_lock_irq(q->queue_lock); \
201 if (val) \
202 queue_flag_set(QUEUE_FLAG_##flag, q); \
203 else \
204 queue_flag_clear(QUEUE_FLAG_##flag, q); \
205 spin_unlock_irq(q->queue_lock); \
206 return ret; \
186} 207}
187 208
188static ssize_t queue_nonrot_store(struct request_queue *q, const char *page, 209QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1);
189 size_t count) 210QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0);
190{ 211QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0);
191 unsigned long nm; 212#undef QUEUE_SYSFS_BIT_FNS
192 ssize_t ret = queue_var_store(&nm, page, count);
193
194 spin_lock_irq(q->queue_lock);
195 if (nm)
196 queue_flag_clear(QUEUE_FLAG_NONROT, q);
197 else
198 queue_flag_set(QUEUE_FLAG_NONROT, q);
199 spin_unlock_irq(q->queue_lock);
200
201 return ret;
202}
203 213
204static ssize_t queue_nomerges_show(struct request_queue *q, char *page) 214static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
205{ 215{
@@ -250,27 +260,6 @@ queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
250 return ret; 260 return ret;
251} 261}
252 262
253static ssize_t queue_iostats_show(struct request_queue *q, char *page)
254{
255 return queue_var_show(blk_queue_io_stat(q), page);
256}
257
258static ssize_t queue_iostats_store(struct request_queue *q, const char *page,
259 size_t count)
260{
261 unsigned long stats;
262 ssize_t ret = queue_var_store(&stats, page, count);
263
264 spin_lock_irq(q->queue_lock);
265 if (stats)
266 queue_flag_set(QUEUE_FLAG_IO_STAT, q);
267 else
268 queue_flag_clear(QUEUE_FLAG_IO_STAT, q);
269 spin_unlock_irq(q->queue_lock);
270
271 return ret;
272}
273
274static struct queue_sysfs_entry queue_requests_entry = { 263static struct queue_sysfs_entry queue_requests_entry = {
275 .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR }, 264 .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
276 .show = queue_requests_show, 265 .show = queue_requests_show,
@@ -352,8 +341,8 @@ static struct queue_sysfs_entry queue_discard_zeroes_data_entry = {
352 341
353static struct queue_sysfs_entry queue_nonrot_entry = { 342static struct queue_sysfs_entry queue_nonrot_entry = {
354 .attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR }, 343 .attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR },
355 .show = queue_nonrot_show, 344 .show = queue_show_nonrot,
356 .store = queue_nonrot_store, 345 .store = queue_store_nonrot,
357}; 346};
358 347
359static struct queue_sysfs_entry queue_nomerges_entry = { 348static struct queue_sysfs_entry queue_nomerges_entry = {
@@ -370,8 +359,14 @@ static struct queue_sysfs_entry queue_rq_affinity_entry = {
370 359
371static struct queue_sysfs_entry queue_iostats_entry = { 360static struct queue_sysfs_entry queue_iostats_entry = {
372 .attr = {.name = "iostats", .mode = S_IRUGO | S_IWUSR }, 361 .attr = {.name = "iostats", .mode = S_IRUGO | S_IWUSR },
373 .show = queue_iostats_show, 362 .show = queue_show_iostats,
374 .store = queue_iostats_store, 363 .store = queue_store_iostats,
364};
365
366static struct queue_sysfs_entry queue_random_entry = {
367 .attr = {.name = "add_random", .mode = S_IRUGO | S_IWUSR },
368 .show = queue_show_random,
369 .store = queue_store_random,
375}; 370};
376 371
377static struct attribute *default_attrs[] = { 372static struct attribute *default_attrs[] = {
@@ -394,6 +389,7 @@ static struct attribute *default_attrs[] = {
394 &queue_nomerges_entry.attr, 389 &queue_nomerges_entry.attr,
395 &queue_rq_affinity_entry.attr, 390 &queue_rq_affinity_entry.attr,
396 &queue_iostats_entry.attr, 391 &queue_iostats_entry.attr,
392 &queue_random_entry.attr,
397 NULL, 393 NULL,
398}; 394};
399 395
diff --git a/block/blk.h b/block/blk.h
index 5ee3d7e72feb..6e7dc87141e4 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -161,8 +161,10 @@ static inline int blk_cpu_to_group(int cpu)
161 */ 161 */
162static inline int blk_do_io_stat(struct request *rq) 162static inline int blk_do_io_stat(struct request *rq)
163{ 163{
164 return rq->rq_disk && blk_rq_io_stat(rq) && 164 return rq->rq_disk &&
165 (blk_fs_request(rq) || blk_discard_rq(rq)); 165 (rq->cmd_flags & REQ_IO_STAT) &&
166 (rq->cmd_type == REQ_TYPE_FS ||
167 (rq->cmd_flags & REQ_DISCARD));
166} 168}
167 169
168#endif 170#endif
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 7982b830db58..eb4086f7dfef 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -458,7 +458,7 @@ static inline struct cfq_data *cic_to_cfqd(struct cfq_io_context *cic)
458 */ 458 */
459static inline bool cfq_bio_sync(struct bio *bio) 459static inline bool cfq_bio_sync(struct bio *bio)
460{ 460{
461 return bio_data_dir(bio) == READ || bio_rw_flagged(bio, BIO_RW_SYNCIO); 461 return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC);
462} 462}
463 463
464/* 464/*
@@ -646,9 +646,10 @@ cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2,
646 return rq1; 646 return rq1;
647 else if (rq_is_sync(rq2) && !rq_is_sync(rq1)) 647 else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
648 return rq2; 648 return rq2;
649 if (rq_is_meta(rq1) && !rq_is_meta(rq2)) 649 if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META))
650 return rq1; 650 return rq1;
651 else if (rq_is_meta(rq2) && !rq_is_meta(rq1)) 651 else if ((rq2->cmd_flags & REQ_META) &&
652 !(rq1->cmd_flags & REQ_META))
652 return rq2; 653 return rq2;
653 654
654 s1 = blk_rq_pos(rq1); 655 s1 = blk_rq_pos(rq1);
@@ -1484,7 +1485,7 @@ static void cfq_remove_request(struct request *rq)
1484 cfqq->cfqd->rq_queued--; 1485 cfqq->cfqd->rq_queued--;
1485 cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg, 1486 cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
1486 rq_data_dir(rq), rq_is_sync(rq)); 1487 rq_data_dir(rq), rq_is_sync(rq));
1487 if (rq_is_meta(rq)) { 1488 if (rq->cmd_flags & REQ_META) {
1488 WARN_ON(!cfqq->meta_pending); 1489 WARN_ON(!cfqq->meta_pending);
1489 cfqq->meta_pending--; 1490 cfqq->meta_pending--;
1490 } 1491 }
@@ -3176,7 +3177,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
3176 * So both queues are sync. Let the new request get disk time if 3177 * So both queues are sync. Let the new request get disk time if
3177 * it's a metadata request and the current queue is doing regular IO. 3178 * it's a metadata request and the current queue is doing regular IO.
3178 */ 3179 */
3179 if (rq_is_meta(rq) && !cfqq->meta_pending) 3180 if ((rq->cmd_flags & REQ_META) && !cfqq->meta_pending)
3180 return true; 3181 return true;
3181 3182
3182 /* 3183 /*
@@ -3230,7 +3231,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3230 struct cfq_io_context *cic = RQ_CIC(rq); 3231 struct cfq_io_context *cic = RQ_CIC(rq);
3231 3232
3232 cfqd->rq_queued++; 3233 cfqd->rq_queued++;
3233 if (rq_is_meta(rq)) 3234 if (rq->cmd_flags & REQ_META)
3234 cfqq->meta_pending++; 3235 cfqq->meta_pending++;
3235 3236
3236 cfq_update_io_thinktime(cfqd, cic); 3237 cfq_update_io_thinktime(cfqd, cic);
@@ -3365,7 +3366,8 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
3365 unsigned long now; 3366 unsigned long now;
3366 3367
3367 now = jiffies; 3368 now = jiffies;
3368 cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d", !!rq_noidle(rq)); 3369 cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d",
3370 !!(rq->cmd_flags & REQ_NOIDLE));
3369 3371
3370 cfq_update_hw_tag(cfqd); 3372 cfq_update_hw_tag(cfqd);
3371 3373
@@ -3419,11 +3421,12 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
3419 cfq_slice_expired(cfqd, 1); 3421 cfq_slice_expired(cfqd, 1);
3420 else if (sync && cfqq_empty && 3422 else if (sync && cfqq_empty &&
3421 !cfq_close_cooperator(cfqd, cfqq)) { 3423 !cfq_close_cooperator(cfqd, cfqq)) {
3422 cfqd->noidle_tree_requires_idle |= !rq_noidle(rq); 3424 cfqd->noidle_tree_requires_idle |=
3425 !(rq->cmd_flags & REQ_NOIDLE);
3423 /* 3426 /*
3424 * Idling is enabled for SYNC_WORKLOAD. 3427 * Idling is enabled for SYNC_WORKLOAD.
3425 * SYNC_NOIDLE_WORKLOAD idles at the end of the tree 3428 * SYNC_NOIDLE_WORKLOAD idles at the end of the tree
3426 * only if we processed at least one !rq_noidle request 3429 * only if we processed at least one !REQ_NOIDLE request
3427 */ 3430 */
3428 if (cfqd->serving_type == SYNC_WORKLOAD 3431 if (cfqd->serving_type == SYNC_WORKLOAD
3429 || cfqd->noidle_tree_requires_idle 3432 || cfqd->noidle_tree_requires_idle
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
index f26051f44681..d53085637731 100644
--- a/block/compat_ioctl.c
+++ b/block/compat_ioctl.c
@@ -535,56 +535,6 @@ out:
535 return err; 535 return err;
536} 536}
537 537
538struct compat_blk_user_trace_setup {
539 char name[32];
540 u16 act_mask;
541 u32 buf_size;
542 u32 buf_nr;
543 compat_u64 start_lba;
544 compat_u64 end_lba;
545 u32 pid;
546};
547#define BLKTRACESETUP32 _IOWR(0x12, 115, struct compat_blk_user_trace_setup)
548
549static int compat_blk_trace_setup(struct block_device *bdev, char __user *arg)
550{
551 struct blk_user_trace_setup buts;
552 struct compat_blk_user_trace_setup cbuts;
553 struct request_queue *q;
554 char b[BDEVNAME_SIZE];
555 int ret;
556
557 q = bdev_get_queue(bdev);
558 if (!q)
559 return -ENXIO;
560
561 if (copy_from_user(&cbuts, arg, sizeof(cbuts)))
562 return -EFAULT;
563
564 bdevname(bdev, b);
565
566 buts = (struct blk_user_trace_setup) {
567 .act_mask = cbuts.act_mask,
568 .buf_size = cbuts.buf_size,
569 .buf_nr = cbuts.buf_nr,
570 .start_lba = cbuts.start_lba,
571 .end_lba = cbuts.end_lba,
572 .pid = cbuts.pid,
573 };
574 memcpy(&buts.name, &cbuts.name, 32);
575
576 mutex_lock(&bdev->bd_mutex);
577 ret = do_blk_trace_setup(q, b, bdev->bd_dev, bdev, &buts);
578 mutex_unlock(&bdev->bd_mutex);
579 if (ret)
580 return ret;
581
582 if (copy_to_user(arg, &buts.name, 32))
583 return -EFAULT;
584
585 return 0;
586}
587
588static int compat_blkdev_driver_ioctl(struct block_device *bdev, fmode_t mode, 538static int compat_blkdev_driver_ioctl(struct block_device *bdev, fmode_t mode,
589 unsigned cmd, unsigned long arg) 539 unsigned cmd, unsigned long arg)
590{ 540{
@@ -802,16 +752,10 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
802 return compat_put_u64(arg, bdev->bd_inode->i_size); 752 return compat_put_u64(arg, bdev->bd_inode->i_size);
803 753
804 case BLKTRACESETUP32: 754 case BLKTRACESETUP32:
805 lock_kernel();
806 ret = compat_blk_trace_setup(bdev, compat_ptr(arg));
807 unlock_kernel();
808 return ret;
809 case BLKTRACESTART: /* compatible */ 755 case BLKTRACESTART: /* compatible */
810 case BLKTRACESTOP: /* compatible */ 756 case BLKTRACESTOP: /* compatible */
811 case BLKTRACETEARDOWN: /* compatible */ 757 case BLKTRACETEARDOWN: /* compatible */
812 lock_kernel();
813 ret = blk_trace_ioctl(bdev, cmd, compat_ptr(arg)); 758 ret = blk_trace_ioctl(bdev, cmd, compat_ptr(arg));
814 unlock_kernel();
815 return ret; 759 return ret;
816 default: 760 default:
817 if (disk->fops->compat_ioctl) 761 if (disk->fops->compat_ioctl)
diff --git a/block/elevator.c b/block/elevator.c
index 923a9139106c..816a7c8d6394 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -79,8 +79,7 @@ int elv_rq_merge_ok(struct request *rq, struct bio *bio)
79 /* 79 /*
80 * Don't merge file system requests and discard requests 80 * Don't merge file system requests and discard requests
81 */ 81 */
82 if (bio_rw_flagged(bio, BIO_RW_DISCARD) != 82 if ((bio->bi_rw & REQ_DISCARD) != (rq->bio->bi_rw & REQ_DISCARD))
83 bio_rw_flagged(rq->bio, BIO_RW_DISCARD))
84 return 0; 83 return 0;
85 84
86 /* 85 /*
@@ -428,7 +427,8 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
428 list_for_each_prev(entry, &q->queue_head) { 427 list_for_each_prev(entry, &q->queue_head) {
429 struct request *pos = list_entry_rq(entry); 428 struct request *pos = list_entry_rq(entry);
430 429
431 if (blk_discard_rq(rq) != blk_discard_rq(pos)) 430 if ((rq->cmd_flags & REQ_DISCARD) !=
431 (pos->cmd_flags & REQ_DISCARD))
432 break; 432 break;
433 if (rq_data_dir(rq) != rq_data_dir(pos)) 433 if (rq_data_dir(rq) != rq_data_dir(pos))
434 break; 434 break;
@@ -558,7 +558,7 @@ void elv_requeue_request(struct request_queue *q, struct request *rq)
558 */ 558 */
559 if (blk_account_rq(rq)) { 559 if (blk_account_rq(rq)) {
560 q->in_flight[rq_is_sync(rq)]--; 560 q->in_flight[rq_is_sync(rq)]--;
561 if (blk_sorted_rq(rq)) 561 if (rq->cmd_flags & REQ_SORTED)
562 elv_deactivate_rq(q, rq); 562 elv_deactivate_rq(q, rq);
563 } 563 }
564 564
@@ -644,7 +644,8 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
644 break; 644 break;
645 645
646 case ELEVATOR_INSERT_SORT: 646 case ELEVATOR_INSERT_SORT:
647 BUG_ON(!blk_fs_request(rq) && !blk_discard_rq(rq)); 647 BUG_ON(rq->cmd_type != REQ_TYPE_FS &&
648 !(rq->cmd_flags & REQ_DISCARD));
648 rq->cmd_flags |= REQ_SORTED; 649 rq->cmd_flags |= REQ_SORTED;
649 q->nr_sorted++; 650 q->nr_sorted++;
650 if (rq_mergeable(rq)) { 651 if (rq_mergeable(rq)) {
@@ -716,7 +717,7 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where,
716 /* 717 /*
717 * toggle ordered color 718 * toggle ordered color
718 */ 719 */
719 if (blk_barrier_rq(rq)) 720 if (rq->cmd_flags & REQ_HARDBARRIER)
720 q->ordcolor ^= 1; 721 q->ordcolor ^= 1;
721 722
722 /* 723 /*
@@ -729,7 +730,8 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where,
729 * this request is scheduling boundary, update 730 * this request is scheduling boundary, update
730 * end_sector 731 * end_sector
731 */ 732 */
732 if (blk_fs_request(rq) || blk_discard_rq(rq)) { 733 if (rq->cmd_type == REQ_TYPE_FS ||
734 (rq->cmd_flags & REQ_DISCARD)) {
733 q->end_sector = rq_end_sector(rq); 735 q->end_sector = rq_end_sector(rq);
734 q->boundary_rq = rq; 736 q->boundary_rq = rq;
735 } 737 }
@@ -843,7 +845,8 @@ void elv_completed_request(struct request_queue *q, struct request *rq)
843 */ 845 */
844 if (blk_account_rq(rq)) { 846 if (blk_account_rq(rq)) {
845 q->in_flight[rq_is_sync(rq)]--; 847 q->in_flight[rq_is_sync(rq)]--;
846 if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn) 848 if ((rq->cmd_flags & REQ_SORTED) &&
849 e->ops->elevator_completed_req_fn)
847 e->ops->elevator_completed_req_fn(q, rq); 850 e->ops->elevator_completed_req_fn(q, rq);
848 } 851 }
849 852
diff --git a/block/ioctl.c b/block/ioctl.c
index e8eb679f2f9b..09fd7f1ef23a 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -163,18 +163,10 @@ int __blkdev_driver_ioctl(struct block_device *bdev, fmode_t mode,
163 unsigned cmd, unsigned long arg) 163 unsigned cmd, unsigned long arg)
164{ 164{
165 struct gendisk *disk = bdev->bd_disk; 165 struct gendisk *disk = bdev->bd_disk;
166 int ret;
167 166
168 if (disk->fops->ioctl) 167 if (disk->fops->ioctl)
169 return disk->fops->ioctl(bdev, mode, cmd, arg); 168 return disk->fops->ioctl(bdev, mode, cmd, arg);
170 169
171 if (disk->fops->locked_ioctl) {
172 lock_kernel();
173 ret = disk->fops->locked_ioctl(bdev, mode, cmd, arg);
174 unlock_kernel();
175 return ret;
176 }
177
178 return -ENOTTY; 170 return -ENOTTY;
179} 171}
180/* 172/*
@@ -185,8 +177,7 @@ int __blkdev_driver_ioctl(struct block_device *bdev, fmode_t mode,
185EXPORT_SYMBOL_GPL(__blkdev_driver_ioctl); 177EXPORT_SYMBOL_GPL(__blkdev_driver_ioctl);
186 178
187/* 179/*
188 * always keep this in sync with compat_blkdev_ioctl() and 180 * always keep this in sync with compat_blkdev_ioctl()
189 * compat_blkdev_locked_ioctl()
190 */ 181 */
191int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, 182int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
192 unsigned long arg) 183 unsigned long arg)
@@ -206,10 +197,8 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
206 if (ret != -EINVAL && ret != -ENOTTY) 197 if (ret != -EINVAL && ret != -ENOTTY)
207 return ret; 198 return ret;
208 199
209 lock_kernel();
210 fsync_bdev(bdev); 200 fsync_bdev(bdev);
211 invalidate_bdev(bdev); 201 invalidate_bdev(bdev);
212 unlock_kernel();
213 return 0; 202 return 0;
214 203
215 case BLKROSET: 204 case BLKROSET:
@@ -221,9 +210,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
221 return -EACCES; 210 return -EACCES;
222 if (get_user(n, (int __user *)(arg))) 211 if (get_user(n, (int __user *)(arg)))
223 return -EFAULT; 212 return -EFAULT;
224 lock_kernel();
225 set_device_ro(bdev, n); 213 set_device_ro(bdev, n);
226 unlock_kernel();
227 return 0; 214 return 0;
228 215
229 case BLKDISCARD: { 216 case BLKDISCARD: {
@@ -309,14 +296,10 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
309 bd_release(bdev); 296 bd_release(bdev);
310 return ret; 297 return ret;
311 case BLKPG: 298 case BLKPG:
312 lock_kernel();
313 ret = blkpg_ioctl(bdev, (struct blkpg_ioctl_arg __user *) arg); 299 ret = blkpg_ioctl(bdev, (struct blkpg_ioctl_arg __user *) arg);
314 unlock_kernel();
315 break; 300 break;
316 case BLKRRPART: 301 case BLKRRPART:
317 lock_kernel();
318 ret = blkdev_reread_part(bdev); 302 ret = blkdev_reread_part(bdev);
319 unlock_kernel();
320 break; 303 break;
321 case BLKGETSIZE: 304 case BLKGETSIZE:
322 size = bdev->bd_inode->i_size; 305 size = bdev->bd_inode->i_size;
@@ -329,9 +312,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
329 case BLKTRACESTOP: 312 case BLKTRACESTOP:
330 case BLKTRACESETUP: 313 case BLKTRACESETUP:
331 case BLKTRACETEARDOWN: 314 case BLKTRACETEARDOWN:
332 lock_kernel();
333 ret = blk_trace_ioctl(bdev, cmd, (char __user *) arg); 315 ret = blk_trace_ioctl(bdev, cmd, (char __user *) arg);
334 unlock_kernel();
335 break; 316 break;
336 default: 317 default:
337 ret = __blkdev_driver_ioctl(bdev, mode, cmd, arg); 318 ret = __blkdev_driver_ioctl(bdev, mode, cmd, arg);