aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-core.c
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2010-08-11 03:36:51 -0400
committerJiri Kosina <jkosina@suse.cz>2010-08-11 03:36:51 -0400
commit6396fc3b3ff3f6b942992b653a62df11dcef9bea (patch)
treedb3c7cbe833b43c653adc99f70941431c5ff7c4e /block/blk-core.c
parent4785879e4d340e24e54f6de2ccfc42728b912808 (diff)
parent3d30701b58970425e1d45994d6cb82f828924fdd (diff)
Merge branch 'master' into for-next
Conflicts: fs/exofs/inode.c
Diffstat (limited to 'block/blk-core.c')
-rw-r--r--block/blk-core.c117
1 files changed, 82 insertions, 35 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 7ac24fa71f7a..77411486b111 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -184,7 +184,7 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
184 printk(KERN_INFO " bio %p, biotail %p, buffer %p, len %u\n", 184 printk(KERN_INFO " bio %p, biotail %p, buffer %p, len %u\n",
185 rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq)); 185 rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq));
186 186
187 if (blk_pc_request(rq)) { 187 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
188 printk(KERN_INFO " cdb: "); 188 printk(KERN_INFO " cdb: ");
189 for (bit = 0; bit < BLK_MAX_CDB; bit++) 189 for (bit = 0; bit < BLK_MAX_CDB; bit++)
190 printk("%02x ", rq->cmd[bit]); 190 printk("%02x ", rq->cmd[bit]);
@@ -608,6 +608,7 @@ blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
608 608
609 q->request_fn = rfn; 609 q->request_fn = rfn;
610 q->prep_rq_fn = NULL; 610 q->prep_rq_fn = NULL;
611 q->unprep_rq_fn = NULL;
611 q->unplug_fn = generic_unplug_device; 612 q->unplug_fn = generic_unplug_device;
612 q->queue_flags = QUEUE_FLAG_DEFAULT; 613 q->queue_flags = QUEUE_FLAG_DEFAULT;
613 q->queue_lock = lock; 614 q->queue_lock = lock;
@@ -1135,30 +1136,46 @@ void blk_put_request(struct request *req)
1135} 1136}
1136EXPORT_SYMBOL(blk_put_request); 1137EXPORT_SYMBOL(blk_put_request);
1137 1138
1139/**
1140 * blk_add_request_payload - add a payload to a request
1141 * @rq: request to update
1142 * @page: page backing the payload
1143 * @len: length of the payload.
1144 *
1145 * This allows to later add a payload to an already submitted request by
1146 * a block driver. The driver needs to take care of freeing the payload
1147 * itself.
1148 *
1149 * Note that this is a quite horrible hack and nothing but handling of
1150 * discard requests should ever use it.
1151 */
1152void blk_add_request_payload(struct request *rq, struct page *page,
1153 unsigned int len)
1154{
1155 struct bio *bio = rq->bio;
1156
1157 bio->bi_io_vec->bv_page = page;
1158 bio->bi_io_vec->bv_offset = 0;
1159 bio->bi_io_vec->bv_len = len;
1160
1161 bio->bi_size = len;
1162 bio->bi_vcnt = 1;
1163 bio->bi_phys_segments = 1;
1164
1165 rq->__data_len = rq->resid_len = len;
1166 rq->nr_phys_segments = 1;
1167 rq->buffer = bio_data(bio);
1168}
1169EXPORT_SYMBOL_GPL(blk_add_request_payload);
1170
1138void init_request_from_bio(struct request *req, struct bio *bio) 1171void init_request_from_bio(struct request *req, struct bio *bio)
1139{ 1172{
1140 req->cpu = bio->bi_comp_cpu; 1173 req->cpu = bio->bi_comp_cpu;
1141 req->cmd_type = REQ_TYPE_FS; 1174 req->cmd_type = REQ_TYPE_FS;
1142 1175
1143 /* 1176 req->cmd_flags |= bio->bi_rw & REQ_COMMON_MASK;
1144 * Inherit FAILFAST from bio (for read-ahead, and explicit 1177 if (bio->bi_rw & REQ_RAHEAD)
1145 * FAILFAST). FAILFAST flags are identical for req and bio.
1146 */
1147 if (bio_rw_flagged(bio, BIO_RW_AHEAD))
1148 req->cmd_flags |= REQ_FAILFAST_MASK; 1178 req->cmd_flags |= REQ_FAILFAST_MASK;
1149 else
1150 req->cmd_flags |= bio->bi_rw & REQ_FAILFAST_MASK;
1151
1152 if (bio_rw_flagged(bio, BIO_RW_DISCARD))
1153 req->cmd_flags |= REQ_DISCARD;
1154 if (bio_rw_flagged(bio, BIO_RW_BARRIER))
1155 req->cmd_flags |= REQ_HARDBARRIER;
1156 if (bio_rw_flagged(bio, BIO_RW_SYNCIO))
1157 req->cmd_flags |= REQ_RW_SYNC;
1158 if (bio_rw_flagged(bio, BIO_RW_META))
1159 req->cmd_flags |= REQ_RW_META;
1160 if (bio_rw_flagged(bio, BIO_RW_NOIDLE))
1161 req->cmd_flags |= REQ_NOIDLE;
1162 1179
1163 req->errors = 0; 1180 req->errors = 0;
1164 req->__sector = bio->bi_sector; 1181 req->__sector = bio->bi_sector;
@@ -1181,12 +1198,12 @@ static int __make_request(struct request_queue *q, struct bio *bio)
1181 int el_ret; 1198 int el_ret;
1182 unsigned int bytes = bio->bi_size; 1199 unsigned int bytes = bio->bi_size;
1183 const unsigned short prio = bio_prio(bio); 1200 const unsigned short prio = bio_prio(bio);
1184 const bool sync = bio_rw_flagged(bio, BIO_RW_SYNCIO); 1201 const bool sync = (bio->bi_rw & REQ_SYNC);
1185 const bool unplug = bio_rw_flagged(bio, BIO_RW_UNPLUG); 1202 const bool unplug = (bio->bi_rw & REQ_UNPLUG);
1186 const unsigned int ff = bio->bi_rw & REQ_FAILFAST_MASK; 1203 const unsigned int ff = bio->bi_rw & REQ_FAILFAST_MASK;
1187 int rw_flags; 1204 int rw_flags;
1188 1205
1189 if (bio_rw_flagged(bio, BIO_RW_BARRIER) && 1206 if ((bio->bi_rw & REQ_HARDBARRIER) &&
1190 (q->next_ordered == QUEUE_ORDERED_NONE)) { 1207 (q->next_ordered == QUEUE_ORDERED_NONE)) {
1191 bio_endio(bio, -EOPNOTSUPP); 1208 bio_endio(bio, -EOPNOTSUPP);
1192 return 0; 1209 return 0;
@@ -1200,7 +1217,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
1200 1217
1201 spin_lock_irq(q->queue_lock); 1218 spin_lock_irq(q->queue_lock);
1202 1219
1203 if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER)) || elv_queue_empty(q)) 1220 if (unlikely((bio->bi_rw & REQ_HARDBARRIER)) || elv_queue_empty(q))
1204 goto get_rq; 1221 goto get_rq;
1205 1222
1206 el_ret = elv_merge(q, &req, bio); 1223 el_ret = elv_merge(q, &req, bio);
@@ -1275,7 +1292,7 @@ get_rq:
1275 */ 1292 */
1276 rw_flags = bio_data_dir(bio); 1293 rw_flags = bio_data_dir(bio);
1277 if (sync) 1294 if (sync)
1278 rw_flags |= REQ_RW_SYNC; 1295 rw_flags |= REQ_SYNC;
1279 1296
1280 /* 1297 /*
1281 * Grab a free request. This is might sleep but can not fail. 1298 * Grab a free request. This is might sleep but can not fail.
@@ -1464,7 +1481,7 @@ static inline void __generic_make_request(struct bio *bio)
1464 goto end_io; 1481 goto end_io;
1465 } 1482 }
1466 1483
1467 if (unlikely(!bio_rw_flagged(bio, BIO_RW_DISCARD) && 1484 if (unlikely(!(bio->bi_rw & REQ_DISCARD) &&
1468 nr_sectors > queue_max_hw_sectors(q))) { 1485 nr_sectors > queue_max_hw_sectors(q))) {
1469 printk(KERN_ERR "bio too big device %s (%u > %u)\n", 1486 printk(KERN_ERR "bio too big device %s (%u > %u)\n",
1470 bdevname(bio->bi_bdev, b), 1487 bdevname(bio->bi_bdev, b),
@@ -1497,8 +1514,7 @@ static inline void __generic_make_request(struct bio *bio)
1497 if (bio_check_eod(bio, nr_sectors)) 1514 if (bio_check_eod(bio, nr_sectors))
1498 goto end_io; 1515 goto end_io;
1499 1516
1500 if (bio_rw_flagged(bio, BIO_RW_DISCARD) && 1517 if ((bio->bi_rw & REQ_DISCARD) && !blk_queue_discard(q)) {
1501 !blk_queue_discard(q)) {
1502 err = -EOPNOTSUPP; 1518 err = -EOPNOTSUPP;
1503 goto end_io; 1519 goto end_io;
1504 } 1520 }
@@ -1583,7 +1599,7 @@ void submit_bio(int rw, struct bio *bio)
1583 * If it's a regular read/write or a barrier with data attached, 1599 * If it's a regular read/write or a barrier with data attached,
1584 * go through the normal accounting stuff before submission. 1600 * go through the normal accounting stuff before submission.
1585 */ 1601 */
1586 if (bio_has_data(bio) && !(rw & (1 << BIO_RW_DISCARD))) { 1602 if (bio_has_data(bio) && !(rw & REQ_DISCARD)) {
1587 if (rw & WRITE) { 1603 if (rw & WRITE) {
1588 count_vm_events(PGPGOUT, count); 1604 count_vm_events(PGPGOUT, count);
1589 } else { 1605 } else {
@@ -1628,6 +1644,9 @@ EXPORT_SYMBOL(submit_bio);
1628 */ 1644 */
1629int blk_rq_check_limits(struct request_queue *q, struct request *rq) 1645int blk_rq_check_limits(struct request_queue *q, struct request *rq)
1630{ 1646{
1647 if (rq->cmd_flags & REQ_DISCARD)
1648 return 0;
1649
1631 if (blk_rq_sectors(rq) > queue_max_sectors(q) || 1650 if (blk_rq_sectors(rq) > queue_max_sectors(q) ||
1632 blk_rq_bytes(rq) > queue_max_hw_sectors(q) << 9) { 1651 blk_rq_bytes(rq) > queue_max_hw_sectors(q) << 9) {
1633 printk(KERN_ERR "%s: over max size limit.\n", __func__); 1652 printk(KERN_ERR "%s: over max size limit.\n", __func__);
@@ -1796,7 +1815,7 @@ struct request *blk_peek_request(struct request_queue *q)
1796 * sees this request (possibly after 1815 * sees this request (possibly after
1797 * requeueing). Notify IO scheduler. 1816 * requeueing). Notify IO scheduler.
1798 */ 1817 */
1799 if (blk_sorted_rq(rq)) 1818 if (rq->cmd_flags & REQ_SORTED)
1800 elv_activate_rq(q, rq); 1819 elv_activate_rq(q, rq);
1801 1820
1802 /* 1821 /*
@@ -1984,10 +2003,11 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
1984 * TODO: tj: This is too subtle. It would be better to let 2003 * TODO: tj: This is too subtle. It would be better to let
1985 * low level drivers do what they see fit. 2004 * low level drivers do what they see fit.
1986 */ 2005 */
1987 if (blk_fs_request(req)) 2006 if (req->cmd_type == REQ_TYPE_FS)
1988 req->errors = 0; 2007 req->errors = 0;
1989 2008
1990 if (error && (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))) { 2009 if (error && req->cmd_type == REQ_TYPE_FS &&
2010 !(req->cmd_flags & REQ_QUIET)) {
1991 printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n", 2011 printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n",
1992 req->rq_disk ? req->rq_disk->disk_name : "?", 2012 req->rq_disk ? req->rq_disk->disk_name : "?",
1993 (unsigned long long)blk_rq_pos(req)); 2013 (unsigned long long)blk_rq_pos(req));
@@ -2074,7 +2094,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
2074 req->buffer = bio_data(req->bio); 2094 req->buffer = bio_data(req->bio);
2075 2095
2076 /* update sector only for requests with clear definition of sector */ 2096 /* update sector only for requests with clear definition of sector */
2077 if (blk_fs_request(req) || blk_discard_rq(req)) 2097 if (req->cmd_type == REQ_TYPE_FS || (req->cmd_flags & REQ_DISCARD))
2078 req->__sector += total_bytes >> 9; 2098 req->__sector += total_bytes >> 9;
2079 2099
2080 /* mixed attributes always follow the first bio */ 2100 /* mixed attributes always follow the first bio */
@@ -2111,11 +2131,32 @@ static bool blk_update_bidi_request(struct request *rq, int error,
2111 blk_update_request(rq->next_rq, error, bidi_bytes)) 2131 blk_update_request(rq->next_rq, error, bidi_bytes))
2112 return true; 2132 return true;
2113 2133
2114 add_disk_randomness(rq->rq_disk); 2134 if (blk_queue_add_random(rq->q))
2135 add_disk_randomness(rq->rq_disk);
2115 2136
2116 return false; 2137 return false;
2117} 2138}
2118 2139
2140/**
2141 * blk_unprep_request - unprepare a request
2142 * @req: the request
2143 *
2144 * This function makes a request ready for complete resubmission (or
2145 * completion). It happens only after all error handling is complete,
2146 * so represents the appropriate moment to deallocate any resources
2147 * that were allocated to the request in the prep_rq_fn. The queue
2148 * lock is held when calling this.
2149 */
2150void blk_unprep_request(struct request *req)
2151{
2152 struct request_queue *q = req->q;
2153
2154 req->cmd_flags &= ~REQ_DONTPREP;
2155 if (q->unprep_rq_fn)
2156 q->unprep_rq_fn(q, req);
2157}
2158EXPORT_SYMBOL_GPL(blk_unprep_request);
2159
2119/* 2160/*
2120 * queue lock must be held 2161 * queue lock must be held
2121 */ 2162 */
@@ -2126,11 +2167,15 @@ static void blk_finish_request(struct request *req, int error)
2126 2167
2127 BUG_ON(blk_queued_rq(req)); 2168 BUG_ON(blk_queued_rq(req));
2128 2169
2129 if (unlikely(laptop_mode) && blk_fs_request(req)) 2170 if (unlikely(laptop_mode) && req->cmd_type == REQ_TYPE_FS)
2130 laptop_io_completion(&req->q->backing_dev_info); 2171 laptop_io_completion(&req->q->backing_dev_info);
2131 2172
2132 blk_delete_timer(req); 2173 blk_delete_timer(req);
2133 2174
2175 if (req->cmd_flags & REQ_DONTPREP)
2176 blk_unprep_request(req);
2177
2178
2134 blk_account_io_done(req); 2179 blk_account_io_done(req);
2135 2180
2136 if (req->end_io) 2181 if (req->end_io)
@@ -2363,7 +2408,7 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
2363 struct bio *bio) 2408 struct bio *bio)
2364{ 2409{
2365 /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */ 2410 /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */
2366 rq->cmd_flags |= bio->bi_rw & REQ_RW; 2411 rq->cmd_flags |= bio->bi_rw & REQ_WRITE;
2367 2412
2368 if (bio_has_data(bio)) { 2413 if (bio_has_data(bio)) {
2369 rq->nr_phys_segments = bio_phys_segments(q, bio); 2414 rq->nr_phys_segments = bio_phys_segments(q, bio);
@@ -2450,6 +2495,8 @@ static void __blk_rq_prep_clone(struct request *dst, struct request *src)
2450{ 2495{
2451 dst->cpu = src->cpu; 2496 dst->cpu = src->cpu;
2452 dst->cmd_flags = (rq_data_dir(src) | REQ_NOMERGE); 2497 dst->cmd_flags = (rq_data_dir(src) | REQ_NOMERGE);
2498 if (src->cmd_flags & REQ_DISCARD)
2499 dst->cmd_flags |= REQ_DISCARD;
2453 dst->cmd_type = src->cmd_type; 2500 dst->cmd_type = src->cmd_type;
2454 dst->__sector = blk_rq_pos(src); 2501 dst->__sector = blk_rq_pos(src);
2455 dst->__data_len = blk_rq_bytes(src); 2502 dst->__data_len = blk_rq_bytes(src);