aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-core.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-09-14 20:55:15 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-14 20:55:15 -0400
commit355bbd8cb82e60a592f6cd86ce6dbe5677615cf4 (patch)
tree23678e50ad4687f1656edc972388ee8014e7b89d /block/blk-core.c
parent39695224bd84dc4be29abad93a0ec232a16fc519 (diff)
parent746cd1e7e4a555ddaee53b19a46e05c9c61eaf09 (diff)
Merge branch 'for-2.6.32' of git://git.kernel.dk/linux-2.6-block
* 'for-2.6.32' of git://git.kernel.dk/linux-2.6-block: (29 commits) block: use blkdev_issue_discard in blk_ioctl_discard Make DISCARD_BARRIER and DISCARD_NOBARRIER writes instead of reads block: don't assume device has a request list backing in nr_requests store block: Optimal I/O limit wrapper cfq: choose a new next_req when a request is dispatched Seperate read and write statistics of in_flight requests aoe: end barrier bios with EOPNOTSUPP block: trace bio queueing trial only when it occurs block: enable rq CPU completion affinity by default cfq: fix the log message after dispatched a request block: use printk_once cciss: memory leak in cciss_init_one() splice: update mtime and atime on files block: make blk_iopoll_prep_sched() follow normal 0/1 return convention cfq-iosched: get rid of must_alloc flag block: use interrupts disabled version of raise_softirq_irqoff() block: fix comment in blk-iopoll.c block: adjust default budget for blk-iopoll block: fix long lines in block/blk-iopoll.c block: add blk-iopoll, a NAPI like approach for block devices ...
Diffstat (limited to 'block/blk-core.c')
-rw-r--r--block/blk-core.c166
1 files changed, 134 insertions, 32 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index e695634882a6..8135228e4b29 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -69,7 +69,7 @@ static void drive_stat_acct(struct request *rq, int new_io)
69 part_stat_inc(cpu, part, merges[rw]); 69 part_stat_inc(cpu, part, merges[rw]);
70 else { 70 else {
71 part_round_stats(cpu, part); 71 part_round_stats(cpu, part);
72 part_inc_in_flight(part); 72 part_inc_in_flight(part, rw);
73 } 73 }
74 74
75 part_stat_unlock(); 75 part_stat_unlock();
@@ -1031,7 +1031,7 @@ static void part_round_stats_single(int cpu, struct hd_struct *part,
1031 1031
1032 if (part->in_flight) { 1032 if (part->in_flight) {
1033 __part_stat_add(cpu, part, time_in_queue, 1033 __part_stat_add(cpu, part, time_in_queue,
1034 part->in_flight * (now - part->stamp)); 1034 part_in_flight(part) * (now - part->stamp));
1035 __part_stat_add(cpu, part, io_ticks, (now - part->stamp)); 1035 __part_stat_add(cpu, part, io_ticks, (now - part->stamp));
1036 } 1036 }
1037 part->stamp = now; 1037 part->stamp = now;
@@ -1112,31 +1112,27 @@ void init_request_from_bio(struct request *req, struct bio *bio)
1112 req->cmd_type = REQ_TYPE_FS; 1112 req->cmd_type = REQ_TYPE_FS;
1113 1113
1114 /* 1114 /*
1115 * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST) 1115 * Inherit FAILFAST from bio (for read-ahead, and explicit
1116 * FAILFAST). FAILFAST flags are identical for req and bio.
1116 */ 1117 */
1117 if (bio_rw_ahead(bio)) 1118 if (bio_rw_flagged(bio, BIO_RW_AHEAD))
1118 req->cmd_flags |= (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | 1119 req->cmd_flags |= REQ_FAILFAST_MASK;
1119 REQ_FAILFAST_DRIVER); 1120 else
1120 if (bio_failfast_dev(bio)) 1121 req->cmd_flags |= bio->bi_rw & REQ_FAILFAST_MASK;
1121 req->cmd_flags |= REQ_FAILFAST_DEV; 1122
1122 if (bio_failfast_transport(bio)) 1123 if (unlikely(bio_rw_flagged(bio, BIO_RW_DISCARD))) {
1123 req->cmd_flags |= REQ_FAILFAST_TRANSPORT;
1124 if (bio_failfast_driver(bio))
1125 req->cmd_flags |= REQ_FAILFAST_DRIVER;
1126
1127 if (unlikely(bio_discard(bio))) {
1128 req->cmd_flags |= REQ_DISCARD; 1124 req->cmd_flags |= REQ_DISCARD;
1129 if (bio_barrier(bio)) 1125 if (bio_rw_flagged(bio, BIO_RW_BARRIER))
1130 req->cmd_flags |= REQ_SOFTBARRIER; 1126 req->cmd_flags |= REQ_SOFTBARRIER;
1131 req->q->prepare_discard_fn(req->q, req); 1127 req->q->prepare_discard_fn(req->q, req);
1132 } else if (unlikely(bio_barrier(bio))) 1128 } else if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER)))
1133 req->cmd_flags |= REQ_HARDBARRIER; 1129 req->cmd_flags |= REQ_HARDBARRIER;
1134 1130
1135 if (bio_sync(bio)) 1131 if (bio_rw_flagged(bio, BIO_RW_SYNCIO))
1136 req->cmd_flags |= REQ_RW_SYNC; 1132 req->cmd_flags |= REQ_RW_SYNC;
1137 if (bio_rw_meta(bio)) 1133 if (bio_rw_flagged(bio, BIO_RW_META))
1138 req->cmd_flags |= REQ_RW_META; 1134 req->cmd_flags |= REQ_RW_META;
1139 if (bio_noidle(bio)) 1135 if (bio_rw_flagged(bio, BIO_RW_NOIDLE))
1140 req->cmd_flags |= REQ_NOIDLE; 1136 req->cmd_flags |= REQ_NOIDLE;
1141 1137
1142 req->errors = 0; 1138 req->errors = 0;
@@ -1151,7 +1147,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)
1151 */ 1147 */
1152static inline bool queue_should_plug(struct request_queue *q) 1148static inline bool queue_should_plug(struct request_queue *q)
1153{ 1149{
1154 return !(blk_queue_nonrot(q) && blk_queue_tagged(q)); 1150 return !(blk_queue_nonrot(q) && blk_queue_queuing(q));
1155} 1151}
1156 1152
1157static int __make_request(struct request_queue *q, struct bio *bio) 1153static int __make_request(struct request_queue *q, struct bio *bio)
@@ -1160,11 +1156,12 @@ static int __make_request(struct request_queue *q, struct bio *bio)
1160 int el_ret; 1156 int el_ret;
1161 unsigned int bytes = bio->bi_size; 1157 unsigned int bytes = bio->bi_size;
1162 const unsigned short prio = bio_prio(bio); 1158 const unsigned short prio = bio_prio(bio);
1163 const int sync = bio_sync(bio); 1159 const bool sync = bio_rw_flagged(bio, BIO_RW_SYNCIO);
1164 const int unplug = bio_unplug(bio); 1160 const bool unplug = bio_rw_flagged(bio, BIO_RW_UNPLUG);
1161 const unsigned int ff = bio->bi_rw & REQ_FAILFAST_MASK;
1165 int rw_flags; 1162 int rw_flags;
1166 1163
1167 if (bio_barrier(bio) && bio_has_data(bio) && 1164 if (bio_rw_flagged(bio, BIO_RW_BARRIER) && bio_has_data(bio) &&
1168 (q->next_ordered == QUEUE_ORDERED_NONE)) { 1165 (q->next_ordered == QUEUE_ORDERED_NONE)) {
1169 bio_endio(bio, -EOPNOTSUPP); 1166 bio_endio(bio, -EOPNOTSUPP);
1170 return 0; 1167 return 0;
@@ -1178,7 +1175,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
1178 1175
1179 spin_lock_irq(q->queue_lock); 1176 spin_lock_irq(q->queue_lock);
1180 1177
1181 if (unlikely(bio_barrier(bio)) || elv_queue_empty(q)) 1178 if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER)) || elv_queue_empty(q))
1182 goto get_rq; 1179 goto get_rq;
1183 1180
1184 el_ret = elv_merge(q, &req, bio); 1181 el_ret = elv_merge(q, &req, bio);
@@ -1191,6 +1188,9 @@ static int __make_request(struct request_queue *q, struct bio *bio)
1191 1188
1192 trace_block_bio_backmerge(q, bio); 1189 trace_block_bio_backmerge(q, bio);
1193 1190
1191 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
1192 blk_rq_set_mixed_merge(req);
1193
1194 req->biotail->bi_next = bio; 1194 req->biotail->bi_next = bio;
1195 req->biotail = bio; 1195 req->biotail = bio;
1196 req->__data_len += bytes; 1196 req->__data_len += bytes;
@@ -1210,6 +1210,12 @@ static int __make_request(struct request_queue *q, struct bio *bio)
1210 1210
1211 trace_block_bio_frontmerge(q, bio); 1211 trace_block_bio_frontmerge(q, bio);
1212 1212
1213 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) {
1214 blk_rq_set_mixed_merge(req);
1215 req->cmd_flags &= ~REQ_FAILFAST_MASK;
1216 req->cmd_flags |= ff;
1217 }
1218
1213 bio->bi_next = req->bio; 1219 bio->bi_next = req->bio;
1214 req->bio = bio; 1220 req->bio = bio;
1215 1221
@@ -1457,19 +1463,20 @@ static inline void __generic_make_request(struct bio *bio)
1457 if (old_sector != -1) 1463 if (old_sector != -1)
1458 trace_block_remap(q, bio, old_dev, old_sector); 1464 trace_block_remap(q, bio, old_dev, old_sector);
1459 1465
1460 trace_block_bio_queue(q, bio);
1461
1462 old_sector = bio->bi_sector; 1466 old_sector = bio->bi_sector;
1463 old_dev = bio->bi_bdev->bd_dev; 1467 old_dev = bio->bi_bdev->bd_dev;
1464 1468
1465 if (bio_check_eod(bio, nr_sectors)) 1469 if (bio_check_eod(bio, nr_sectors))
1466 goto end_io; 1470 goto end_io;
1467 1471
1468 if (bio_discard(bio) && !q->prepare_discard_fn) { 1472 if (bio_rw_flagged(bio, BIO_RW_DISCARD) &&
1473 !q->prepare_discard_fn) {
1469 err = -EOPNOTSUPP; 1474 err = -EOPNOTSUPP;
1470 goto end_io; 1475 goto end_io;
1471 } 1476 }
1472 1477
1478 trace_block_bio_queue(q, bio);
1479
1473 ret = q->make_request_fn(q, bio); 1480 ret = q->make_request_fn(q, bio);
1474 } while (ret); 1481 } while (ret);
1475 1482
@@ -1654,6 +1661,50 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
1654} 1661}
1655EXPORT_SYMBOL_GPL(blk_insert_cloned_request); 1662EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
1656 1663
1664/**
1665 * blk_rq_err_bytes - determine number of bytes till the next failure boundary
1666 * @rq: request to examine
1667 *
1668 * Description:
1669 * A request could be merge of IOs which require different failure
1670 * handling. This function determines the number of bytes which
1671 * can be failed from the beginning of the request without
1672 * crossing into area which need to be retried further.
1673 *
1674 * Return:
1675 * The number of bytes to fail.
1676 *
1677 * Context:
1678 * queue_lock must be held.
1679 */
1680unsigned int blk_rq_err_bytes(const struct request *rq)
1681{
1682 unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
1683 unsigned int bytes = 0;
1684 struct bio *bio;
1685
1686 if (!(rq->cmd_flags & REQ_MIXED_MERGE))
1687 return blk_rq_bytes(rq);
1688
1689 /*
1690 * Currently the only 'mixing' which can happen is between
1691 * different fastfail types. We can safely fail portions
1692 * which have all the failfast bits that the first one has -
1693 * the ones which are at least as eager to fail as the first
1694 * one.
1695 */
1696 for (bio = rq->bio; bio; bio = bio->bi_next) {
1697 if ((bio->bi_rw & ff) != ff)
1698 break;
1699 bytes += bio->bi_size;
1700 }
1701
1702 /* this could lead to infinite loop */
1703 BUG_ON(blk_rq_bytes(rq) && !bytes);
1704 return bytes;
1705}
1706EXPORT_SYMBOL_GPL(blk_rq_err_bytes);
1707
1657static void blk_account_io_completion(struct request *req, unsigned int bytes) 1708static void blk_account_io_completion(struct request *req, unsigned int bytes)
1658{ 1709{
1659 if (blk_do_io_stat(req)) { 1710 if (blk_do_io_stat(req)) {
@@ -1687,7 +1738,7 @@ static void blk_account_io_done(struct request *req)
1687 part_stat_inc(cpu, part, ios[rw]); 1738 part_stat_inc(cpu, part, ios[rw]);
1688 part_stat_add(cpu, part, ticks[rw], duration); 1739 part_stat_add(cpu, part, ticks[rw], duration);
1689 part_round_stats(cpu, part); 1740 part_round_stats(cpu, part);
1690 part_dec_in_flight(part); 1741 part_dec_in_flight(part, rw);
1691 1742
1692 part_stat_unlock(); 1743 part_stat_unlock();
1693 } 1744 }
@@ -1807,8 +1858,15 @@ void blk_dequeue_request(struct request *rq)
1807 * and to it is freed is accounted as io that is in progress at 1858 * and to it is freed is accounted as io that is in progress at
1808 * the driver side. 1859 * the driver side.
1809 */ 1860 */
1810 if (blk_account_rq(rq)) 1861 if (blk_account_rq(rq)) {
1811 q->in_flight[rq_is_sync(rq)]++; 1862 q->in_flight[rq_is_sync(rq)]++;
1863 /*
1864 * Mark this device as supporting hardware queuing, if
1865 * we have more IOs in flight than 4.
1866 */
1867 if (!blk_queue_queuing(q) && queue_in_flight(q) > 4)
1868 set_bit(QUEUE_FLAG_CQ, &q->queue_flags);
1869 }
1812} 1870}
1813 1871
1814/** 1872/**
@@ -2000,6 +2058,12 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
2000 if (blk_fs_request(req) || blk_discard_rq(req)) 2058 if (blk_fs_request(req) || blk_discard_rq(req))
2001 req->__sector += total_bytes >> 9; 2059 req->__sector += total_bytes >> 9;
2002 2060
2061 /* mixed attributes always follow the first bio */
2062 if (req->cmd_flags & REQ_MIXED_MERGE) {
2063 req->cmd_flags &= ~REQ_FAILFAST_MASK;
2064 req->cmd_flags |= req->bio->bi_rw & REQ_FAILFAST_MASK;
2065 }
2066
2003 /* 2067 /*
2004 * If total number of sectors is less than the first segment 2068 * If total number of sectors is less than the first segment
2005 * size, something has gone terribly wrong. 2069 * size, something has gone terribly wrong.
@@ -2179,6 +2243,25 @@ bool blk_end_request_cur(struct request *rq, int error)
2179EXPORT_SYMBOL(blk_end_request_cur); 2243EXPORT_SYMBOL(blk_end_request_cur);
2180 2244
2181/** 2245/**
2246 * blk_end_request_err - Finish a request till the next failure boundary.
2247 * @rq: the request to finish till the next failure boundary for
2248 * @error: must be negative errno
2249 *
2250 * Description:
2251 * Complete @rq till the next failure boundary.
2252 *
2253 * Return:
2254 * %false - we are done with this request
2255 * %true - still buffers pending for this request
2256 */
2257bool blk_end_request_err(struct request *rq, int error)
2258{
2259 WARN_ON(error >= 0);
2260 return blk_end_request(rq, error, blk_rq_err_bytes(rq));
2261}
2262EXPORT_SYMBOL_GPL(blk_end_request_err);
2263
2264/**
2182 * __blk_end_request - Helper function for drivers to complete the request. 2265 * __blk_end_request - Helper function for drivers to complete the request.
2183 * @rq: the request being processed 2266 * @rq: the request being processed
2184 * @error: %0 for success, < %0 for error 2267 * @error: %0 for success, < %0 for error
@@ -2237,12 +2320,31 @@ bool __blk_end_request_cur(struct request *rq, int error)
2237} 2320}
2238EXPORT_SYMBOL(__blk_end_request_cur); 2321EXPORT_SYMBOL(__blk_end_request_cur);
2239 2322
2323/**
2324 * __blk_end_request_err - Finish a request till the next failure boundary.
2325 * @rq: the request to finish till the next failure boundary for
2326 * @error: must be negative errno
2327 *
2328 * Description:
2329 * Complete @rq till the next failure boundary. Must be called
2330 * with queue lock held.
2331 *
2332 * Return:
2333 * %false - we are done with this request
2334 * %true - still buffers pending for this request
2335 */
2336bool __blk_end_request_err(struct request *rq, int error)
2337{
2338 WARN_ON(error >= 0);
2339 return __blk_end_request(rq, error, blk_rq_err_bytes(rq));
2340}
2341EXPORT_SYMBOL_GPL(__blk_end_request_err);
2342
2240void blk_rq_bio_prep(struct request_queue *q, struct request *rq, 2343void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
2241 struct bio *bio) 2344 struct bio *bio)
2242{ 2345{
2243 /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw, and 2346 /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */
2244 we want BIO_RW_AHEAD (bit 1) to imply REQ_FAILFAST (bit 1). */ 2347 rq->cmd_flags |= bio->bi_rw & REQ_RW;
2245 rq->cmd_flags |= (bio->bi_rw & 3);
2246 2348
2247 if (bio_has_data(bio)) { 2349 if (bio_has_data(bio)) {
2248 rq->nr_phys_segments = bio_phys_segments(q, bio); 2350 rq->nr_phys_segments = bio_phys_segments(q, bio);