aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMartin K. Petersen <martin.petersen@oracle.com>2010-03-10 00:48:32 -0500
committerJens Axboe <jens.axboe@oracle.com>2010-03-15 07:47:59 -0400
commitee714f2dd33e726346e34f5cda12543162f4753e (patch)
tree63252755cc38a81085191e6b851a6a04623cc9b9
parent2cda2728aa1c8c006418a24f867b25e5eb7a32e2 (diff)
block: Finalize conversion of block limits functions
Remove compatibility wrappers and update remaining drivers. Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
-rw-r--r--drivers/block/DAC960.c1
-rw-r--r--drivers/block/virtio_blk.c5
-rw-r--r--include/linux/blkdev.h24
3 files changed, 2 insertions, 28 deletions
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index 459f1bc25a7b..c5f22bb0a48e 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -2533,7 +2533,6 @@ static bool DAC960_RegisterBlockDevice(DAC960_Controller_T *Controller)
2533 Controller->RequestQueue[n] = RequestQueue; 2533 Controller->RequestQueue[n] = RequestQueue;
2534 blk_queue_bounce_limit(RequestQueue, Controller->BounceBufferLimit); 2534 blk_queue_bounce_limit(RequestQueue, Controller->BounceBufferLimit);
2535 RequestQueue->queuedata = Controller; 2535 RequestQueue->queuedata = Controller;
2536 blk_queue_max_hw_segments(RequestQueue, Controller->DriverScatterGatherLimit);
2537 blk_queue_max_segments(RequestQueue, Controller->DriverScatterGatherLimit); 2536 blk_queue_max_segments(RequestQueue, Controller->DriverScatterGatherLimit);
2538 blk_queue_max_hw_sectors(RequestQueue, Controller->MaxBlocksPerCommand); 2537 blk_queue_max_hw_sectors(RequestQueue, Controller->MaxBlocksPerCommand);
2539 disk->queue = RequestQueue; 2538 disk->queue = RequestQueue;
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 3c64af05fa82..653817ceeedd 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -347,14 +347,13 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
347 set_capacity(vblk->disk, cap); 347 set_capacity(vblk->disk, cap);
348 348
349 /* We can handle whatever the host told us to handle. */ 349 /* We can handle whatever the host told us to handle. */
350 blk_queue_max_phys_segments(q, vblk->sg_elems-2); 350 blk_queue_max_segments(q, vblk->sg_elems-2);
351 blk_queue_max_hw_segments(q, vblk->sg_elems-2);
352 351
353 /* No need to bounce any requests */ 352 /* No need to bounce any requests */
354 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY); 353 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
355 354
356 /* No real sector limit. */ 355 /* No real sector limit. */
357 blk_queue_max_sectors(q, -1U); 356 blk_queue_max_hw_sectors(q, -1U);
358 357
359 /* Host can optionally specify maximum segment size and number of 358 /* Host can optionally specify maximum segment size and number of
360 * segments. */ 359 * segments. */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index ebd22dbed861..41551c9341b6 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -921,26 +921,7 @@ extern void blk_cleanup_queue(struct request_queue *);
921extern void blk_queue_make_request(struct request_queue *, make_request_fn *); 921extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
922extern void blk_queue_bounce_limit(struct request_queue *, u64); 922extern void blk_queue_bounce_limit(struct request_queue *, u64);
923extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); 923extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
924
925/* Temporary compatibility wrapper */
926static inline void blk_queue_max_sectors(struct request_queue *q, unsigned int max)
927{
928 blk_queue_max_hw_sectors(q, max);
929}
930
931extern void blk_queue_max_segments(struct request_queue *, unsigned short); 924extern void blk_queue_max_segments(struct request_queue *, unsigned short);
932
933static inline void blk_queue_max_phys_segments(struct request_queue *q, unsigned short max)
934{
935 blk_queue_max_segments(q, max);
936}
937
938static inline void blk_queue_max_hw_segments(struct request_queue *q, unsigned short max)
939{
940 blk_queue_max_segments(q, max);
941}
942
943
944extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 925extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
945extern void blk_queue_max_discard_sectors(struct request_queue *q, 926extern void blk_queue_max_discard_sectors(struct request_queue *q,
946 unsigned int max_discard_sectors); 927 unsigned int max_discard_sectors);
@@ -1030,11 +1011,6 @@ static inline int sb_issue_discard(struct super_block *sb,
1030 1011
1031extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); 1012extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm);
1032 1013
1033#define MAX_PHYS_SEGMENTS 128
1034#define MAX_HW_SEGMENTS 128
1035#define SAFE_MAX_SECTORS 255
1036#define MAX_SEGMENT_SIZE 65536
1037
1038enum blk_default_limits { 1014enum blk_default_limits {
1039 BLK_MAX_SEGMENTS = 128, 1015 BLK_MAX_SEGMENTS = 128,
1040 BLK_SAFE_MAX_SECTORS = 255, 1016 BLK_SAFE_MAX_SECTORS = 255,