diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-12-30 20:20:05 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-12-30 20:20:05 -0500 |
commit | 1dff81f20cd55ffa5a8ee984da70ce0b99d29606 (patch) | |
tree | 06eb07bda250abfa8a78c3141db56862c8c7cf98 /block/blk-core.c | |
parent | 179475a3b46f86e2d06f83e2312218ac3f0cf3a7 (diff) | |
parent | d3f761104b097738932afcc310fbbbbfb007ef92 (diff) |
Merge branch 'for-2.6.29' of git://git.kernel.dk/linux-2.6-block
* 'for-2.6.29' of git://git.kernel.dk/linux-2.6-block: (43 commits)
bio: get rid of bio_vec clearing
bounce: don't rely on a zeroed bio_vec list
cciss: simplify parameters to deregister_disk function
cfq-iosched: fix race between exiting queue and exiting task
loop: Do not call loop_unplug for not configured loop device.
loop: Flush possible running bios when loop device is released.
alpha: remove dead BIO_VMERGE_BOUNDARY
Get rid of CONFIG_LSF
block: make blk_softirq_init() static
block: use min_not_zero in blk_queue_stack_limits
block: add one-hit cache for disk partition lookup
cfq-iosched: remove limit of dispatch depth of max 4 times quantum
nbd: tell the block layer that it is not a rotational device
block: get rid of elevator_t typedef
aio: make the lookup_ioctx() lockless
bio: add support for inlining a number of bio_vecs inside the bio
bio: allow individual slabs in the bio_set
bio: move the slab pointer inside the bio_set
bio: only mempool back the largest bio_vec slab cache
block: don't use plugging on SSD devices
...
Diffstat (limited to 'block/blk-core.c')
-rw-r--r-- | block/blk-core.c | 63 |
1 files changed, 19 insertions, 44 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 561e8a1b43a4..a824e49c0d0a 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -153,6 +153,9 @@ static void req_bio_endio(struct request *rq, struct bio *bio, | |||
153 | nbytes = bio->bi_size; | 153 | nbytes = bio->bi_size; |
154 | } | 154 | } |
155 | 155 | ||
156 | if (unlikely(rq->cmd_flags & REQ_QUIET)) | ||
157 | set_bit(BIO_QUIET, &bio->bi_flags); | ||
158 | |||
156 | bio->bi_size -= nbytes; | 159 | bio->bi_size -= nbytes; |
157 | bio->bi_sector += (nbytes >> 9); | 160 | bio->bi_sector += (nbytes >> 9); |
158 | 161 | ||
@@ -265,8 +268,7 @@ void __generic_unplug_device(struct request_queue *q) | |||
265 | { | 268 | { |
266 | if (unlikely(blk_queue_stopped(q))) | 269 | if (unlikely(blk_queue_stopped(q))) |
267 | return; | 270 | return; |
268 | 271 | if (!blk_remove_plug(q) && !blk_queue_nonrot(q)) | |
269 | if (!blk_remove_plug(q)) | ||
270 | return; | 272 | return; |
271 | 273 | ||
272 | q->request_fn(q); | 274 | q->request_fn(q); |
@@ -404,7 +406,8 @@ EXPORT_SYMBOL(blk_stop_queue); | |||
404 | void blk_sync_queue(struct request_queue *q) | 406 | void blk_sync_queue(struct request_queue *q) |
405 | { | 407 | { |
406 | del_timer_sync(&q->unplug_timer); | 408 | del_timer_sync(&q->unplug_timer); |
407 | kblockd_flush_work(&q->unplug_work); | 409 | del_timer_sync(&q->timeout); |
410 | cancel_work_sync(&q->unplug_work); | ||
408 | } | 411 | } |
409 | EXPORT_SYMBOL(blk_sync_queue); | 412 | EXPORT_SYMBOL(blk_sync_queue); |
410 | 413 | ||
@@ -1135,7 +1138,7 @@ void init_request_from_bio(struct request *req, struct bio *bio) | |||
1135 | static int __make_request(struct request_queue *q, struct bio *bio) | 1138 | static int __make_request(struct request_queue *q, struct bio *bio) |
1136 | { | 1139 | { |
1137 | struct request *req; | 1140 | struct request *req; |
1138 | int el_ret, nr_sectors, barrier, discard, err; | 1141 | int el_ret, nr_sectors; |
1139 | const unsigned short prio = bio_prio(bio); | 1142 | const unsigned short prio = bio_prio(bio); |
1140 | const int sync = bio_sync(bio); | 1143 | const int sync = bio_sync(bio); |
1141 | int rw_flags; | 1144 | int rw_flags; |
@@ -1149,22 +1152,9 @@ static int __make_request(struct request_queue *q, struct bio *bio) | |||
1149 | */ | 1152 | */ |
1150 | blk_queue_bounce(q, &bio); | 1153 | blk_queue_bounce(q, &bio); |
1151 | 1154 | ||
1152 | barrier = bio_barrier(bio); | ||
1153 | if (unlikely(barrier) && bio_has_data(bio) && | ||
1154 | (q->next_ordered == QUEUE_ORDERED_NONE)) { | ||
1155 | err = -EOPNOTSUPP; | ||
1156 | goto end_io; | ||
1157 | } | ||
1158 | |||
1159 | discard = bio_discard(bio); | ||
1160 | if (unlikely(discard) && !q->prepare_discard_fn) { | ||
1161 | err = -EOPNOTSUPP; | ||
1162 | goto end_io; | ||
1163 | } | ||
1164 | |||
1165 | spin_lock_irq(q->queue_lock); | 1155 | spin_lock_irq(q->queue_lock); |
1166 | 1156 | ||
1167 | if (unlikely(barrier) || elv_queue_empty(q)) | 1157 | if (unlikely(bio_barrier(bio)) || elv_queue_empty(q)) |
1168 | goto get_rq; | 1158 | goto get_rq; |
1169 | 1159 | ||
1170 | el_ret = elv_merge(q, &req, bio); | 1160 | el_ret = elv_merge(q, &req, bio); |
@@ -1250,18 +1240,14 @@ get_rq: | |||
1250 | if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) || | 1240 | if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) || |
1251 | bio_flagged(bio, BIO_CPU_AFFINE)) | 1241 | bio_flagged(bio, BIO_CPU_AFFINE)) |
1252 | req->cpu = blk_cpu_to_group(smp_processor_id()); | 1242 | req->cpu = blk_cpu_to_group(smp_processor_id()); |
1253 | if (elv_queue_empty(q)) | 1243 | if (!blk_queue_nonrot(q) && elv_queue_empty(q)) |
1254 | blk_plug_device(q); | 1244 | blk_plug_device(q); |
1255 | add_request(q, req); | 1245 | add_request(q, req); |
1256 | out: | 1246 | out: |
1257 | if (sync) | 1247 | if (sync || blk_queue_nonrot(q)) |
1258 | __generic_unplug_device(q); | 1248 | __generic_unplug_device(q); |
1259 | spin_unlock_irq(q->queue_lock); | 1249 | spin_unlock_irq(q->queue_lock); |
1260 | return 0; | 1250 | return 0; |
1261 | |||
1262 | end_io: | ||
1263 | bio_endio(bio, err); | ||
1264 | return 0; | ||
1265 | } | 1251 | } |
1266 | 1252 | ||
1267 | /* | 1253 | /* |
@@ -1414,15 +1400,13 @@ static inline void __generic_make_request(struct bio *bio) | |||
1414 | char b[BDEVNAME_SIZE]; | 1400 | char b[BDEVNAME_SIZE]; |
1415 | 1401 | ||
1416 | q = bdev_get_queue(bio->bi_bdev); | 1402 | q = bdev_get_queue(bio->bi_bdev); |
1417 | if (!q) { | 1403 | if (unlikely(!q)) { |
1418 | printk(KERN_ERR | 1404 | printk(KERN_ERR |
1419 | "generic_make_request: Trying to access " | 1405 | "generic_make_request: Trying to access " |
1420 | "nonexistent block-device %s (%Lu)\n", | 1406 | "nonexistent block-device %s (%Lu)\n", |
1421 | bdevname(bio->bi_bdev, b), | 1407 | bdevname(bio->bi_bdev, b), |
1422 | (long long) bio->bi_sector); | 1408 | (long long) bio->bi_sector); |
1423 | end_io: | 1409 | goto end_io; |
1424 | bio_endio(bio, err); | ||
1425 | break; | ||
1426 | } | 1410 | } |
1427 | 1411 | ||
1428 | if (unlikely(nr_sectors > q->max_hw_sectors)) { | 1412 | if (unlikely(nr_sectors > q->max_hw_sectors)) { |
@@ -1459,14 +1443,19 @@ end_io: | |||
1459 | 1443 | ||
1460 | if (bio_check_eod(bio, nr_sectors)) | 1444 | if (bio_check_eod(bio, nr_sectors)) |
1461 | goto end_io; | 1445 | goto end_io; |
1462 | if ((bio_empty_barrier(bio) && !q->prepare_flush_fn) || | 1446 | |
1463 | (bio_discard(bio) && !q->prepare_discard_fn)) { | 1447 | if (bio_discard(bio) && !q->prepare_discard_fn) { |
1464 | err = -EOPNOTSUPP; | 1448 | err = -EOPNOTSUPP; |
1465 | goto end_io; | 1449 | goto end_io; |
1466 | } | 1450 | } |
1467 | 1451 | ||
1468 | ret = q->make_request_fn(q, bio); | 1452 | ret = q->make_request_fn(q, bio); |
1469 | } while (ret); | 1453 | } while (ret); |
1454 | |||
1455 | return; | ||
1456 | |||
1457 | end_io: | ||
1458 | bio_endio(bio, err); | ||
1470 | } | 1459 | } |
1471 | 1460 | ||
1472 | /* | 1461 | /* |
@@ -1716,14 +1705,6 @@ static int __end_that_request_first(struct request *req, int error, | |||
1716 | while ((bio = req->bio) != NULL) { | 1705 | while ((bio = req->bio) != NULL) { |
1717 | int nbytes; | 1706 | int nbytes; |
1718 | 1707 | ||
1719 | /* | ||
1720 | * For an empty barrier request, the low level driver must | ||
1721 | * store a potential error location in ->sector. We pass | ||
1722 | * that back up in ->bi_sector. | ||
1723 | */ | ||
1724 | if (blk_empty_barrier(req)) | ||
1725 | bio->bi_sector = req->sector; | ||
1726 | |||
1727 | if (nr_bytes >= bio->bi_size) { | 1708 | if (nr_bytes >= bio->bi_size) { |
1728 | req->bio = bio->bi_next; | 1709 | req->bio = bio->bi_next; |
1729 | nbytes = bio->bi_size; | 1710 | nbytes = bio->bi_size; |
@@ -2143,12 +2124,6 @@ int kblockd_schedule_work(struct request_queue *q, struct work_struct *work) | |||
2143 | } | 2124 | } |
2144 | EXPORT_SYMBOL(kblockd_schedule_work); | 2125 | EXPORT_SYMBOL(kblockd_schedule_work); |
2145 | 2126 | ||
2146 | void kblockd_flush_work(struct work_struct *work) | ||
2147 | { | ||
2148 | cancel_work_sync(work); | ||
2149 | } | ||
2150 | EXPORT_SYMBOL(kblockd_flush_work); | ||
2151 | |||
2152 | int __init blk_dev_init(void) | 2127 | int __init blk_dev_init(void) |
2153 | { | 2128 | { |
2154 | kblockd_workqueue = create_workqueue("kblockd"); | 2129 | kblockd_workqueue = create_workqueue("kblockd"); |