aboutsummaryrefslogtreecommitdiffstats
path: root/block/ll_rw_blk.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 13:09:16 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 13:09:16 -0400
commit92d15c2ccbb3e31a3fc71ad28fdb55e1319383c0 (patch)
tree8d83c0dc3c6b935d8367e331872f242b742f0a8a /block/ll_rw_blk.c
parentf20bf6125605acbbc7eb8c9420d7221c91aa83eb (diff)
parent644bd2f048972d75eb1979b1fdca257d528ce687 (diff)
Merge branch 'for-linus' of git://git.kernel.dk/data/git/linux-2.6-block
* 'for-linus' of git://git.kernel.dk/data/git/linux-2.6-block: (63 commits) Fix memory leak in dm-crypt SPARC64: sg chaining support SPARC: sg chaining support PPC: sg chaining support PS3: sg chaining support IA64: sg chaining support x86-64: enable sg chaining x86-64: update pci-gart iommu to sg helpers x86-64: update nommu to sg helpers x86-64: update calgary iommu to sg helpers swiotlb: sg chaining support i386: enable sg chaining i386 dma_map_sg: convert to using sg helpers mmc: need to zero sglist on init Panic in blk_rq_map_sg() from CCISS driver remove sglist_len remove blk_queue_max_phys_segments in libata revert sg segment size ifdefs Fixup u14-34f ENABLE_SG_CHAINING qla1280: enable use_sg_chaining option ...
Diffstat (limited to 'block/ll_rw_blk.c')
-rw-r--r--block/ll_rw_blk.c311
1 files changed, 224 insertions, 87 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index a83823fcd74f..9eabac95fbe0 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -30,6 +30,7 @@
30#include <linux/cpu.h> 30#include <linux/cpu.h>
31#include <linux/blktrace_api.h> 31#include <linux/blktrace_api.h>
32#include <linux/fault-inject.h> 32#include <linux/fault-inject.h>
33#include <linux/scatterlist.h>
33 34
34/* 35/*
35 * for max sense size 36 * for max sense size
@@ -304,23 +305,6 @@ int blk_queue_ordered(struct request_queue *q, unsigned ordered,
304 305
305EXPORT_SYMBOL(blk_queue_ordered); 306EXPORT_SYMBOL(blk_queue_ordered);
306 307
307/**
308 * blk_queue_issue_flush_fn - set function for issuing a flush
309 * @q: the request queue
310 * @iff: the function to be called issuing the flush
311 *
312 * Description:
313 * If a driver supports issuing a flush command, the support is notified
314 * to the block layer by defining it through this call.
315 *
316 **/
317void blk_queue_issue_flush_fn(struct request_queue *q, issue_flush_fn *iff)
318{
319 q->issue_flush_fn = iff;
320}
321
322EXPORT_SYMBOL(blk_queue_issue_flush_fn);
323
324/* 308/*
325 * Cache flushing for ordered writes handling 309 * Cache flushing for ordered writes handling
326 */ 310 */
@@ -377,10 +361,12 @@ void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
377 /* 361 /*
378 * Okay, sequence complete. 362 * Okay, sequence complete.
379 */ 363 */
380 rq = q->orig_bar_rq; 364 uptodate = 1;
381 uptodate = q->orderr ? q->orderr : 1; 365 if (q->orderr)
366 uptodate = q->orderr;
382 367
383 q->ordseq = 0; 368 q->ordseq = 0;
369 rq = q->orig_bar_rq;
384 370
385 end_that_request_first(rq, uptodate, rq->hard_nr_sectors); 371 end_that_request_first(rq, uptodate, rq->hard_nr_sectors);
386 end_that_request_last(rq, uptodate); 372 end_that_request_last(rq, uptodate);
@@ -445,7 +431,8 @@ static inline struct request *start_ordered(struct request_queue *q,
445 rq_init(q, rq); 431 rq_init(q, rq);
446 if (bio_data_dir(q->orig_bar_rq->bio) == WRITE) 432 if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
447 rq->cmd_flags |= REQ_RW; 433 rq->cmd_flags |= REQ_RW;
448 rq->cmd_flags |= q->ordered & QUEUE_ORDERED_FUA ? REQ_FUA : 0; 434 if (q->ordered & QUEUE_ORDERED_FUA)
435 rq->cmd_flags |= REQ_FUA;
449 rq->elevator_private = NULL; 436 rq->elevator_private = NULL;
450 rq->elevator_private2 = NULL; 437 rq->elevator_private2 = NULL;
451 init_request_from_bio(rq, q->orig_bar_rq->bio); 438 init_request_from_bio(rq, q->orig_bar_rq->bio);
@@ -455,9 +442,12 @@ static inline struct request *start_ordered(struct request_queue *q,
455 * Queue ordered sequence. As we stack them at the head, we 442 * Queue ordered sequence. As we stack them at the head, we
456 * need to queue in reverse order. Note that we rely on that 443 * need to queue in reverse order. Note that we rely on that
457 * no fs request uses ELEVATOR_INSERT_FRONT and thus no fs 444 * no fs request uses ELEVATOR_INSERT_FRONT and thus no fs
458 * request gets inbetween ordered sequence. 445 * request gets inbetween ordered sequence. If this request is
446 * an empty barrier, we don't need to do a postflush ever since
447 * there will be no data written between the pre and post flush.
448 * Hence a single flush will suffice.
459 */ 449 */
460 if (q->ordered & QUEUE_ORDERED_POSTFLUSH) 450 if ((q->ordered & QUEUE_ORDERED_POSTFLUSH) && !blk_empty_barrier(rq))
461 queue_flush(q, QUEUE_ORDERED_POSTFLUSH); 451 queue_flush(q, QUEUE_ORDERED_POSTFLUSH);
462 else 452 else
463 q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH; 453 q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH;
@@ -481,7 +471,7 @@ static inline struct request *start_ordered(struct request_queue *q,
481int blk_do_ordered(struct request_queue *q, struct request **rqp) 471int blk_do_ordered(struct request_queue *q, struct request **rqp)
482{ 472{
483 struct request *rq = *rqp; 473 struct request *rq = *rqp;
484 int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq); 474 const int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
485 475
486 if (!q->ordseq) { 476 if (!q->ordseq) {
487 if (!is_barrier) 477 if (!is_barrier)
@@ -1329,9 +1319,10 @@ static int blk_hw_contig_segment(struct request_queue *q, struct bio *bio,
1329 * must make sure sg can hold rq->nr_phys_segments entries 1319 * must make sure sg can hold rq->nr_phys_segments entries
1330 */ 1320 */
1331int blk_rq_map_sg(struct request_queue *q, struct request *rq, 1321int blk_rq_map_sg(struct request_queue *q, struct request *rq,
1332 struct scatterlist *sg) 1322 struct scatterlist *sglist)
1333{ 1323{
1334 struct bio_vec *bvec, *bvprv; 1324 struct bio_vec *bvec, *bvprv;
1325 struct scatterlist *next_sg, *sg;
1335 struct req_iterator iter; 1326 struct req_iterator iter;
1336 int nsegs, cluster; 1327 int nsegs, cluster;
1337 1328
@@ -1342,11 +1333,12 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
1342 * for each bio in rq 1333 * for each bio in rq
1343 */ 1334 */
1344 bvprv = NULL; 1335 bvprv = NULL;
1336 sg = next_sg = &sglist[0];
1345 rq_for_each_segment(bvec, rq, iter) { 1337 rq_for_each_segment(bvec, rq, iter) {
1346 int nbytes = bvec->bv_len; 1338 int nbytes = bvec->bv_len;
1347 1339
1348 if (bvprv && cluster) { 1340 if (bvprv && cluster) {
1349 if (sg[nsegs - 1].length + nbytes > q->max_segment_size) 1341 if (sg->length + nbytes > q->max_segment_size)
1350 goto new_segment; 1342 goto new_segment;
1351 1343
1352 if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) 1344 if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
@@ -1354,14 +1346,15 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
1354 if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec)) 1346 if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
1355 goto new_segment; 1347 goto new_segment;
1356 1348
1357 sg[nsegs - 1].length += nbytes; 1349 sg->length += nbytes;
1358 } else { 1350 } else {
1359new_segment: 1351new_segment:
1360 memset(&sg[nsegs],0,sizeof(struct scatterlist)); 1352 sg = next_sg;
1361 sg[nsegs].page = bvec->bv_page; 1353 next_sg = sg_next(sg);
1362 sg[nsegs].length = nbytes;
1363 sg[nsegs].offset = bvec->bv_offset;
1364 1354
1355 sg->page = bvec->bv_page;
1356 sg->length = nbytes;
1357 sg->offset = bvec->bv_offset;
1365 nsegs++; 1358 nsegs++;
1366 } 1359 }
1367 bvprv = bvec; 1360 bvprv = bvec;
@@ -2660,6 +2653,14 @@ int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
2660 2653
2661EXPORT_SYMBOL(blk_execute_rq); 2654EXPORT_SYMBOL(blk_execute_rq);
2662 2655
2656static void bio_end_empty_barrier(struct bio *bio, int err)
2657{
2658 if (err)
2659 clear_bit(BIO_UPTODATE, &bio->bi_flags);
2660
2661 complete(bio->bi_private);
2662}
2663
2663/** 2664/**
2664 * blkdev_issue_flush - queue a flush 2665 * blkdev_issue_flush - queue a flush
2665 * @bdev: blockdev to issue flush for 2666 * @bdev: blockdev to issue flush for
@@ -2672,7 +2673,10 @@ EXPORT_SYMBOL(blk_execute_rq);
2672 */ 2673 */
2673int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector) 2674int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
2674{ 2675{
2676 DECLARE_COMPLETION_ONSTACK(wait);
2675 struct request_queue *q; 2677 struct request_queue *q;
2678 struct bio *bio;
2679 int ret;
2676 2680
2677 if (bdev->bd_disk == NULL) 2681 if (bdev->bd_disk == NULL)
2678 return -ENXIO; 2682 return -ENXIO;
@@ -2680,10 +2684,32 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
2680 q = bdev_get_queue(bdev); 2684 q = bdev_get_queue(bdev);
2681 if (!q) 2685 if (!q)
2682 return -ENXIO; 2686 return -ENXIO;
2683 if (!q->issue_flush_fn)
2684 return -EOPNOTSUPP;
2685 2687
2686 return q->issue_flush_fn(q, bdev->bd_disk, error_sector); 2688 bio = bio_alloc(GFP_KERNEL, 0);
2689 if (!bio)
2690 return -ENOMEM;
2691
2692 bio->bi_end_io = bio_end_empty_barrier;
2693 bio->bi_private = &wait;
2694 bio->bi_bdev = bdev;
2695 submit_bio(1 << BIO_RW_BARRIER, bio);
2696
2697 wait_for_completion(&wait);
2698
2699 /*
2700 * The driver must store the error location in ->bi_sector, if
2701 * it supports it. For non-stacked drivers, this should be copied
2702 * from rq->sector.
2703 */
2704 if (error_sector)
2705 *error_sector = bio->bi_sector;
2706
2707 ret = 0;
2708 if (!bio_flagged(bio, BIO_UPTODATE))
2709 ret = -EIO;
2710
2711 bio_put(bio);
2712 return ret;
2687} 2713}
2688 2714
2689EXPORT_SYMBOL(blkdev_issue_flush); 2715EXPORT_SYMBOL(blkdev_issue_flush);
@@ -3051,7 +3077,7 @@ static inline void blk_partition_remap(struct bio *bio)
3051{ 3077{
3052 struct block_device *bdev = bio->bi_bdev; 3078 struct block_device *bdev = bio->bi_bdev;
3053 3079
3054 if (bdev != bdev->bd_contains) { 3080 if (bio_sectors(bio) && bdev != bdev->bd_contains) {
3055 struct hd_struct *p = bdev->bd_part; 3081 struct hd_struct *p = bdev->bd_part;
3056 const int rw = bio_data_dir(bio); 3082 const int rw = bio_data_dir(bio);
3057 3083
@@ -3117,6 +3143,35 @@ static inline int should_fail_request(struct bio *bio)
3117 3143
3118#endif /* CONFIG_FAIL_MAKE_REQUEST */ 3144#endif /* CONFIG_FAIL_MAKE_REQUEST */
3119 3145
3146/*
3147 * Check whether this bio extends beyond the end of the device.
3148 */
3149static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
3150{
3151 sector_t maxsector;
3152
3153 if (!nr_sectors)
3154 return 0;
3155
3156 /* Test device or partition size, when known. */
3157 maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
3158 if (maxsector) {
3159 sector_t sector = bio->bi_sector;
3160
3161 if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
3162 /*
3163 * This may well happen - the kernel calls bread()
3164 * without checking the size of the device, e.g., when
3165 * mounting a device.
3166 */
3167 handle_bad_sector(bio);
3168 return 1;
3169 }
3170 }
3171
3172 return 0;
3173}
3174
3120/** 3175/**
3121 * generic_make_request: hand a buffer to its device driver for I/O 3176 * generic_make_request: hand a buffer to its device driver for I/O
3122 * @bio: The bio describing the location in memory and on the device. 3177 * @bio: The bio describing the location in memory and on the device.
@@ -3144,27 +3199,14 @@ static inline int should_fail_request(struct bio *bio)
3144static inline void __generic_make_request(struct bio *bio) 3199static inline void __generic_make_request(struct bio *bio)
3145{ 3200{
3146 struct request_queue *q; 3201 struct request_queue *q;
3147 sector_t maxsector;
3148 sector_t old_sector; 3202 sector_t old_sector;
3149 int ret, nr_sectors = bio_sectors(bio); 3203 int ret, nr_sectors = bio_sectors(bio);
3150 dev_t old_dev; 3204 dev_t old_dev;
3151 3205
3152 might_sleep(); 3206 might_sleep();
3153 /* Test device or partition size, when known. */
3154 maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
3155 if (maxsector) {
3156 sector_t sector = bio->bi_sector;
3157 3207
3158 if (maxsector < nr_sectors || maxsector - nr_sectors < sector) { 3208 if (bio_check_eod(bio, nr_sectors))
3159 /* 3209 goto end_io;
3160 * This may well happen - the kernel calls bread()
3161 * without checking the size of the device, e.g., when
3162 * mounting a device.
3163 */
3164 handle_bad_sector(bio);
3165 goto end_io;
3166 }
3167 }
3168 3210
3169 /* 3211 /*
3170 * Resolve the mapping until finished. (drivers are 3212 * Resolve the mapping until finished. (drivers are
@@ -3191,7 +3233,7 @@ end_io:
3191 break; 3233 break;
3192 } 3234 }
3193 3235
3194 if (unlikely(bio_sectors(bio) > q->max_hw_sectors)) { 3236 if (unlikely(nr_sectors > q->max_hw_sectors)) {
3195 printk("bio too big device %s (%u > %u)\n", 3237 printk("bio too big device %s (%u > %u)\n",
3196 bdevname(bio->bi_bdev, b), 3238 bdevname(bio->bi_bdev, b),
3197 bio_sectors(bio), 3239 bio_sectors(bio),
@@ -3212,7 +3254,7 @@ end_io:
3212 blk_partition_remap(bio); 3254 blk_partition_remap(bio);
3213 3255
3214 if (old_sector != -1) 3256 if (old_sector != -1)
3215 blk_add_trace_remap(q, bio, old_dev, bio->bi_sector, 3257 blk_add_trace_remap(q, bio, old_dev, bio->bi_sector,
3216 old_sector); 3258 old_sector);
3217 3259
3218 blk_add_trace_bio(q, bio, BLK_TA_QUEUE); 3260 blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
@@ -3220,21 +3262,8 @@ end_io:
3220 old_sector = bio->bi_sector; 3262 old_sector = bio->bi_sector;
3221 old_dev = bio->bi_bdev->bd_dev; 3263 old_dev = bio->bi_bdev->bd_dev;
3222 3264
3223 maxsector = bio->bi_bdev->bd_inode->i_size >> 9; 3265 if (bio_check_eod(bio, nr_sectors))
3224 if (maxsector) { 3266 goto end_io;
3225 sector_t sector = bio->bi_sector;
3226
3227 if (maxsector < nr_sectors ||
3228 maxsector - nr_sectors < sector) {
3229 /*
3230 * This may well happen - partitions are not
3231 * checked to make sure they are within the size
3232 * of the whole device.
3233 */
3234 handle_bad_sector(bio);
3235 goto end_io;
3236 }
3237 }
3238 3267
3239 ret = q->make_request_fn(q, bio); 3268 ret = q->make_request_fn(q, bio);
3240 } while (ret); 3269 } while (ret);
@@ -3307,23 +3336,32 @@ void submit_bio(int rw, struct bio *bio)
3307{ 3336{
3308 int count = bio_sectors(bio); 3337 int count = bio_sectors(bio);
3309 3338
3310 BIO_BUG_ON(!bio->bi_size);
3311 BIO_BUG_ON(!bio->bi_io_vec);
3312 bio->bi_rw |= rw; 3339 bio->bi_rw |= rw;
3313 if (rw & WRITE) {
3314 count_vm_events(PGPGOUT, count);
3315 } else {
3316 task_io_account_read(bio->bi_size);
3317 count_vm_events(PGPGIN, count);
3318 }
3319 3340
3320 if (unlikely(block_dump)) { 3341 /*
3321 char b[BDEVNAME_SIZE]; 3342 * If it's a regular read/write or a barrier with data attached,
3322 printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n", 3343 * go through the normal accounting stuff before submission.
3323 current->comm, current->pid, 3344 */
3324 (rw & WRITE) ? "WRITE" : "READ", 3345 if (!bio_empty_barrier(bio)) {
3325 (unsigned long long)bio->bi_sector, 3346
3326 bdevname(bio->bi_bdev,b)); 3347 BIO_BUG_ON(!bio->bi_size);
3348 BIO_BUG_ON(!bio->bi_io_vec);
3349
3350 if (rw & WRITE) {
3351 count_vm_events(PGPGOUT, count);
3352 } else {
3353 task_io_account_read(bio->bi_size);
3354 count_vm_events(PGPGIN, count);
3355 }
3356
3357 if (unlikely(block_dump)) {
3358 char b[BDEVNAME_SIZE];
3359 printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n",
3360 current->comm, current->pid,
3361 (rw & WRITE) ? "WRITE" : "READ",
3362 (unsigned long long)bio->bi_sector,
3363 bdevname(bio->bi_bdev,b));
3364 }
3327 } 3365 }
3328 3366
3329 generic_make_request(bio); 3367 generic_make_request(bio);
@@ -3399,6 +3437,14 @@ static int __end_that_request_first(struct request *req, int uptodate,
3399 while ((bio = req->bio) != NULL) { 3437 while ((bio = req->bio) != NULL) {
3400 int nbytes; 3438 int nbytes;
3401 3439
3440 /*
3441 * For an empty barrier request, the low level driver must
3442 * store a potential error location in ->sector. We pass
3443 * that back up in ->bi_sector.
3444 */
3445 if (blk_empty_barrier(req))
3446 bio->bi_sector = req->sector;
3447
3402 if (nr_bytes >= bio->bi_size) { 3448 if (nr_bytes >= bio->bi_size) {
3403 req->bio = bio->bi_next; 3449 req->bio = bio->bi_next;
3404 nbytes = bio->bi_size; 3450 nbytes = bio->bi_size;
@@ -3564,7 +3610,7 @@ static struct notifier_block blk_cpu_notifier __cpuinitdata = {
3564 * Description: 3610 * Description:
3565 * Ends all I/O on a request. It does not handle partial completions, 3611 * Ends all I/O on a request. It does not handle partial completions,
3566 * unless the driver actually implements this in its completion callback 3612 * unless the driver actually implements this in its completion callback
3567 * through requeueing. Theh actual completion happens out-of-order, 3613 * through requeueing. The actual completion happens out-of-order,
3568 * through a softirq handler. The user must have registered a completion 3614 * through a softirq handler. The user must have registered a completion
3569 * callback through blk_queue_softirq_done(). 3615 * callback through blk_queue_softirq_done().
3570 **/ 3616 **/
@@ -3627,15 +3673,83 @@ void end_that_request_last(struct request *req, int uptodate)
3627 3673
3628EXPORT_SYMBOL(end_that_request_last); 3674EXPORT_SYMBOL(end_that_request_last);
3629 3675
3630void end_request(struct request *req, int uptodate) 3676static inline void __end_request(struct request *rq, int uptodate,
3677 unsigned int nr_bytes, int dequeue)
3631{ 3678{
3632 if (!end_that_request_first(req, uptodate, req->hard_cur_sectors)) { 3679 if (!end_that_request_chunk(rq, uptodate, nr_bytes)) {
3633 add_disk_randomness(req->rq_disk); 3680 if (dequeue)
3634 blkdev_dequeue_request(req); 3681 blkdev_dequeue_request(rq);
3635 end_that_request_last(req, uptodate); 3682 add_disk_randomness(rq->rq_disk);
3683 end_that_request_last(rq, uptodate);
3636 } 3684 }
3637} 3685}
3638 3686
3687static unsigned int rq_byte_size(struct request *rq)
3688{
3689 if (blk_fs_request(rq))
3690 return rq->hard_nr_sectors << 9;
3691
3692 return rq->data_len;
3693}
3694
3695/**
3696 * end_queued_request - end all I/O on a queued request
3697 * @rq: the request being processed
3698 * @uptodate: error value or 0/1 uptodate flag
3699 *
3700 * Description:
3701 * Ends all I/O on a request, and removes it from the block layer queues.
3702 * Not suitable for normal IO completion, unless the driver still has
3703 * the request attached to the block layer.
3704 *
3705 **/
3706void end_queued_request(struct request *rq, int uptodate)
3707{
3708 __end_request(rq, uptodate, rq_byte_size(rq), 1);
3709}
3710EXPORT_SYMBOL(end_queued_request);
3711
3712/**
3713 * end_dequeued_request - end all I/O on a dequeued request
3714 * @rq: the request being processed
3715 * @uptodate: error value or 0/1 uptodate flag
3716 *
3717 * Description:
3718 * Ends all I/O on a request. The request must already have been
3719 * dequeued using blkdev_dequeue_request(), as is normally the case
3720 * for most drivers.
3721 *
3722 **/
3723void end_dequeued_request(struct request *rq, int uptodate)
3724{
3725 __end_request(rq, uptodate, rq_byte_size(rq), 0);
3726}
3727EXPORT_SYMBOL(end_dequeued_request);
3728
3729
3730/**
3731 * end_request - end I/O on the current segment of the request
3732 * @rq: the request being processed
3733 * @uptodate: error value or 0/1 uptodate flag
3734 *
3735 * Description:
3736 * Ends I/O on the current segment of a request. If that is the only
3737 * remaining segment, the request is also completed and freed.
3738 *
3739 * This is a remnant of how older block drivers handled IO completions.
3740 * Modern drivers typically end IO on the full request in one go, unless
3741 * they have a residual value to account for. For that case this function
3742 * isn't really useful, unless the residual just happens to be the
3743 * full current segment. In other words, don't use this function in new
3744 * code. Either use end_request_completely(), or the
3745 * end_that_request_chunk() (along with end_that_request_last()) for
3746 * partial completions.
3747 *
3748 **/
3749void end_request(struct request *req, int uptodate)
3750{
3751 __end_request(req, uptodate, req->hard_cur_sectors << 9, 1);
3752}
3639EXPORT_SYMBOL(end_request); 3753EXPORT_SYMBOL(end_request);
3640 3754
3641static void blk_rq_bio_prep(struct request_queue *q, struct request *rq, 3755static void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
@@ -3949,7 +4063,23 @@ static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
3949 return queue_var_show(max_hw_sectors_kb, (page)); 4063 return queue_var_show(max_hw_sectors_kb, (page));
3950} 4064}
3951 4065
4066static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
4067{
4068 return queue_var_show(q->max_phys_segments, page);
4069}
4070
4071static ssize_t queue_max_segments_store(struct request_queue *q,
4072 const char *page, size_t count)
4073{
4074 unsigned long segments;
4075 ssize_t ret = queue_var_store(&segments, page, count);
4076
4077 spin_lock_irq(q->queue_lock);
4078 q->max_phys_segments = segments;
4079 spin_unlock_irq(q->queue_lock);
3952 4080
4081 return ret;
4082}
3953static struct queue_sysfs_entry queue_requests_entry = { 4083static struct queue_sysfs_entry queue_requests_entry = {
3954 .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR }, 4084 .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
3955 .show = queue_requests_show, 4085 .show = queue_requests_show,
@@ -3973,6 +4103,12 @@ static struct queue_sysfs_entry queue_max_hw_sectors_entry = {
3973 .show = queue_max_hw_sectors_show, 4103 .show = queue_max_hw_sectors_show,
3974}; 4104};
3975 4105
4106static struct queue_sysfs_entry queue_max_segments_entry = {
4107 .attr = {.name = "max_segments", .mode = S_IRUGO | S_IWUSR },
4108 .show = queue_max_segments_show,
4109 .store = queue_max_segments_store,
4110};
4111
3976static struct queue_sysfs_entry queue_iosched_entry = { 4112static struct queue_sysfs_entry queue_iosched_entry = {
3977 .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR }, 4113 .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR },
3978 .show = elv_iosched_show, 4114 .show = elv_iosched_show,
@@ -3984,6 +4120,7 @@ static struct attribute *default_attrs[] = {
3984 &queue_ra_entry.attr, 4120 &queue_ra_entry.attr,
3985 &queue_max_hw_sectors_entry.attr, 4121 &queue_max_hw_sectors_entry.attr,
3986 &queue_max_sectors_entry.attr, 4122 &queue_max_sectors_entry.attr,
4123 &queue_max_segments_entry.attr,
3987 &queue_iosched_entry.attr, 4124 &queue_iosched_entry.attr,
3988 NULL, 4125 NULL,
3989}; 4126};