aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMike Christie <michaelc@cs.wisc.edu>2005-12-05 03:37:06 -0500
committerJames Bottomley <jejb@mulgrave.(none)>2005-12-15 18:11:40 -0500
commitdefd94b75409b983f94548ea2f52ff5787ddb848 (patch)
tree0138b2dae748de88edaee4da23431f1a9dd347a1
parent8b05b773b6030de5b1bab1cbb0bf1ff8c34cdbe0 (diff)
[SCSI] seperate max_sectors from max_hw_sectors
- export __blk_put_request and blk_execute_rq_nowait needed for async REQ_BLOCK_PC requests - seperate max_hw_sectors and max_sectors for block/scsi_ioctl.c and SG_IO bio.c helpers per Jens's last comments. Since block/scsi_ioctl.c SG_IO was already testing against max_sectors and SCSI-ml was setting max_sectors and max_hw_sectors to the same value this does not change any scsi SG_IO behavior. It only prepares ll_rw_blk.c, scsi_ioctl.c and bio.c for when SCSI-ml begins to set a valid max_hw_sectors for all LLDs. Today if a LLD does not set it SCSI-ml sets it to a safe default and some LLDs set it to a artificial low value to overcome memory and feedback issues. Note: Since we now cap max_sectors to BLK_DEF_MAX_SECTORS, which is 1024, drivers that used to call blk_queue_max_sectors with a large value of max_sectors will now see the fs requests capped to BLK_DEF_MAX_SECTORS. Signed-off-by: Mike Christie <michaelc@cs.wisc.edu> Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
-rw-r--r--block/ll_rw_blk.c34
-rw-r--r--block/scsi_ioctl.c2
-rw-r--r--drivers/md/dm-table.c2
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--fs/bio.c20
-rw-r--r--include/linux/blkdev.h3
6 files changed, 42 insertions, 21 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index c525b5a2b598..d4beb9a89ee0 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -239,7 +239,7 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn)
239 q->backing_dev_info.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; 239 q->backing_dev_info.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
240 q->backing_dev_info.state = 0; 240 q->backing_dev_info.state = 0;
241 q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY; 241 q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
242 blk_queue_max_sectors(q, MAX_SECTORS); 242 blk_queue_max_sectors(q, SAFE_MAX_SECTORS);
243 blk_queue_hardsect_size(q, 512); 243 blk_queue_hardsect_size(q, 512);
244 blk_queue_dma_alignment(q, 511); 244 blk_queue_dma_alignment(q, 511);
245 blk_queue_congestion_threshold(q); 245 blk_queue_congestion_threshold(q);
@@ -555,7 +555,12 @@ void blk_queue_max_sectors(request_queue_t *q, unsigned short max_sectors)
555 printk("%s: set to minimum %d\n", __FUNCTION__, max_sectors); 555 printk("%s: set to minimum %d\n", __FUNCTION__, max_sectors);
556 } 556 }
557 557
558 q->max_sectors = q->max_hw_sectors = max_sectors; 558 if (BLK_DEF_MAX_SECTORS > max_sectors)
559 q->max_hw_sectors = q->max_sectors = max_sectors;
560 else {
561 q->max_sectors = BLK_DEF_MAX_SECTORS;
562 q->max_hw_sectors = max_sectors;
563 }
559} 564}
560 565
561EXPORT_SYMBOL(blk_queue_max_sectors); 566EXPORT_SYMBOL(blk_queue_max_sectors);
@@ -657,8 +662,8 @@ EXPORT_SYMBOL(blk_queue_hardsect_size);
657void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b) 662void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b)
658{ 663{
659 /* zero is "infinity" */ 664 /* zero is "infinity" */
660 t->max_sectors = t->max_hw_sectors = 665 t->max_sectors = min_not_zero(t->max_sectors,b->max_sectors);
661 min_not_zero(t->max_sectors,b->max_sectors); 666 t->max_hw_sectors = min_not_zero(t->max_hw_sectors,b->max_hw_sectors);
662 667
663 t->max_phys_segments = min(t->max_phys_segments,b->max_phys_segments); 668 t->max_phys_segments = min(t->max_phys_segments,b->max_phys_segments);
664 t->max_hw_segments = min(t->max_hw_segments,b->max_hw_segments); 669 t->max_hw_segments = min(t->max_hw_segments,b->max_hw_segments);
@@ -1293,9 +1298,15 @@ static inline int ll_new_hw_segment(request_queue_t *q,
1293static int ll_back_merge_fn(request_queue_t *q, struct request *req, 1298static int ll_back_merge_fn(request_queue_t *q, struct request *req,
1294 struct bio *bio) 1299 struct bio *bio)
1295{ 1300{
1301 unsigned short max_sectors;
1296 int len; 1302 int len;
1297 1303
1298 if (req->nr_sectors + bio_sectors(bio) > q->max_sectors) { 1304 if (unlikely(blk_pc_request(req)))
1305 max_sectors = q->max_hw_sectors;
1306 else
1307 max_sectors = q->max_sectors;
1308
1309 if (req->nr_sectors + bio_sectors(bio) > max_sectors) {
1299 req->flags |= REQ_NOMERGE; 1310 req->flags |= REQ_NOMERGE;
1300 if (req == q->last_merge) 1311 if (req == q->last_merge)
1301 q->last_merge = NULL; 1312 q->last_merge = NULL;
@@ -1325,9 +1336,16 @@ static int ll_back_merge_fn(request_queue_t *q, struct request *req,
1325static int ll_front_merge_fn(request_queue_t *q, struct request *req, 1336static int ll_front_merge_fn(request_queue_t *q, struct request *req,
1326 struct bio *bio) 1337 struct bio *bio)
1327{ 1338{
1339 unsigned short max_sectors;
1328 int len; 1340 int len;
1329 1341
1330 if (req->nr_sectors + bio_sectors(bio) > q->max_sectors) { 1342 if (unlikely(blk_pc_request(req)))
1343 max_sectors = q->max_hw_sectors;
1344 else
1345 max_sectors = q->max_sectors;
1346
1347
1348 if (req->nr_sectors + bio_sectors(bio) > max_sectors) {
1331 req->flags |= REQ_NOMERGE; 1349 req->flags |= REQ_NOMERGE;
1332 if (req == q->last_merge) 1350 if (req == q->last_merge)
1333 q->last_merge = NULL; 1351 q->last_merge = NULL;
@@ -2144,7 +2162,7 @@ int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf,
2144 struct bio *bio; 2162 struct bio *bio;
2145 int reading; 2163 int reading;
2146 2164
2147 if (len > (q->max_sectors << 9)) 2165 if (len > (q->max_hw_sectors << 9))
2148 return -EINVAL; 2166 return -EINVAL;
2149 if (!len || !ubuf) 2167 if (!len || !ubuf)
2150 return -EINVAL; 2168 return -EINVAL;
@@ -2259,7 +2277,7 @@ int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf,
2259{ 2277{
2260 struct bio *bio; 2278 struct bio *bio;
2261 2279
2262 if (len > (q->max_sectors << 9)) 2280 if (len > (q->max_hw_sectors << 9))
2263 return -EINVAL; 2281 return -EINVAL;
2264 if (!len || !kbuf) 2282 if (!len || !kbuf)
2265 return -EINVAL; 2283 return -EINVAL;
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index 382dea7b224c..4e390dfd3157 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -233,7 +233,7 @@ static int sg_io(struct file *file, request_queue_t *q,
233 if (verify_command(file, cmd)) 233 if (verify_command(file, cmd))
234 return -EPERM; 234 return -EPERM;
235 235
236 if (hdr->dxfer_len > (q->max_sectors << 9)) 236 if (hdr->dxfer_len > (q->max_hw_sectors << 9))
237 return -EIO; 237 return -EIO;
238 238
239 if (hdr->dxfer_len) 239 if (hdr->dxfer_len)
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index a6d3baa46f61..a6f2dc66c3db 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -638,7 +638,7 @@ int dm_split_args(int *argc, char ***argvp, char *input)
638static void check_for_valid_limits(struct io_restrictions *rs) 638static void check_for_valid_limits(struct io_restrictions *rs)
639{ 639{
640 if (!rs->max_sectors) 640 if (!rs->max_sectors)
641 rs->max_sectors = MAX_SECTORS; 641 rs->max_sectors = SAFE_MAX_SECTORS;
642 if (!rs->max_phys_segments) 642 if (!rs->max_phys_segments)
643 rs->max_phys_segments = MAX_PHYS_SEGMENTS; 643 rs->max_phys_segments = MAX_PHYS_SEGMENTS;
644 if (!rs->max_hw_segments) 644 if (!rs->max_hw_segments)
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 54a72f197487..14ad2a785a34 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -462,6 +462,7 @@ int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd,
462 req = blk_get_request(sdev->request_queue, write, gfp); 462 req = blk_get_request(sdev->request_queue, write, gfp);
463 if (!req) 463 if (!req)
464 goto free_sense; 464 goto free_sense;
465 req->flags |= REQ_BLOCK_PC | REQ_QUIET;
465 466
466 if (use_sg) 467 if (use_sg)
467 err = scsi_req_map_sg(req, buffer, use_sg, bufflen, gfp); 468 err = scsi_req_map_sg(req, buffer, use_sg, bufflen, gfp);
@@ -477,7 +478,6 @@ int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd,
477 req->sense_len = 0; 478 req->sense_len = 0;
478 req->timeout = timeout; 479 req->timeout = timeout;
479 req->retries = retries; 480 req->retries = retries;
480 req->flags |= REQ_BLOCK_PC | REQ_QUIET;
481 req->end_io_data = sioc; 481 req->end_io_data = sioc;
482 482
483 sioc->data = privdata; 483 sioc->data = privdata;
diff --git a/fs/bio.c b/fs/bio.c
index 4d21ee3873ec..38d3e8023a07 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -313,7 +313,8 @@ int bio_get_nr_vecs(struct block_device *bdev)
313} 313}
314 314
315static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page 315static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page
316 *page, unsigned int len, unsigned int offset) 316 *page, unsigned int len, unsigned int offset,
317 unsigned short max_sectors)
317{ 318{
318 int retried_segments = 0; 319 int retried_segments = 0;
319 struct bio_vec *bvec; 320 struct bio_vec *bvec;
@@ -327,7 +328,7 @@ static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page
327 if (bio->bi_vcnt >= bio->bi_max_vecs) 328 if (bio->bi_vcnt >= bio->bi_max_vecs)
328 return 0; 329 return 0;
329 330
330 if (((bio->bi_size + len) >> 9) > q->max_sectors) 331 if (((bio->bi_size + len) >> 9) > max_sectors)
331 return 0; 332 return 0;
332 333
333 /* 334 /*
@@ -401,7 +402,7 @@ static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page
401int bio_add_pc_page(request_queue_t *q, struct bio *bio, struct page *page, 402int bio_add_pc_page(request_queue_t *q, struct bio *bio, struct page *page,
402 unsigned int len, unsigned int offset) 403 unsigned int len, unsigned int offset)
403{ 404{
404 return __bio_add_page(q, bio, page, len, offset); 405 return __bio_add_page(q, bio, page, len, offset, q->max_hw_sectors);
405} 406}
406 407
407/** 408/**
@@ -420,8 +421,8 @@ int bio_add_pc_page(request_queue_t *q, struct bio *bio, struct page *page,
420int bio_add_page(struct bio *bio, struct page *page, unsigned int len, 421int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
421 unsigned int offset) 422 unsigned int offset)
422{ 423{
423 return __bio_add_page(bdev_get_queue(bio->bi_bdev), bio, page, 424 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
424 len, offset); 425 return __bio_add_page(q, bio, page, len, offset, q->max_sectors);
425} 426}
426 427
427struct bio_map_data { 428struct bio_map_data {
@@ -533,7 +534,7 @@ struct bio *bio_copy_user(request_queue_t *q, unsigned long uaddr,
533 break; 534 break;
534 } 535 }
535 536
536 if (__bio_add_page(q, bio, page, bytes, 0) < bytes) { 537 if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) {
537 ret = -EINVAL; 538 ret = -EINVAL;
538 break; 539 break;
539 } 540 }
@@ -647,7 +648,8 @@ static struct bio *__bio_map_user_iov(request_queue_t *q,
647 /* 648 /*
648 * sorry... 649 * sorry...
649 */ 650 */
650 if (__bio_add_page(q, bio, pages[j], bytes, offset) < bytes) 651 if (bio_add_pc_page(q, bio, pages[j], bytes, offset) <
652 bytes)
651 break; 653 break;
652 654
653 len -= bytes; 655 len -= bytes;
@@ -820,8 +822,8 @@ static struct bio *__bio_map_kern(request_queue_t *q, void *data,
820 if (bytes > len) 822 if (bytes > len)
821 bytes = len; 823 bytes = len;
822 824
823 if (__bio_add_page(q, bio, virt_to_page(data), bytes, 825 if (bio_add_pc_page(q, bio, virt_to_page(data), bytes,
824 offset) < bytes) 826 offset) < bytes)
825 break; 827 break;
826 828
827 data += bytes; 829 data += bytes;
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 509e9a03a328..a18500d196e1 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -702,7 +702,8 @@ extern int blkdev_issue_flush(struct block_device *, sector_t *);
702 702
703#define MAX_PHYS_SEGMENTS 128 703#define MAX_PHYS_SEGMENTS 128
704#define MAX_HW_SEGMENTS 128 704#define MAX_HW_SEGMENTS 128
705#define MAX_SECTORS 255 705#define SAFE_MAX_SECTORS 255
706#define BLK_DEF_MAX_SECTORS 1024
706 707
707#define MAX_SEGMENT_SIZE 65536 708#define MAX_SEGMENT_SIZE 65536
708 709