aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-settings.c
diff options
context:
space:
mode:
authorMartin K. Petersen <martin.petersen@oracle.com>2009-05-22 17:17:49 -0400
committerJens Axboe <jens.axboe@oracle.com>2009-05-22 17:22:54 -0400
commite1defc4ff0cf57aca6c5e3ff99fa503f5943c1f1 (patch)
treed60d15a082171c58ac811d547d51a9c3119f23e3 /block/blk-settings.c
parent9bd7de51ee8537094656149eaf45338cadb7d7d4 (diff)
block: Do away with the notion of hardsect_size
Until now we have had a 1:1 mapping between storage device physical block size and the logical block sized used when addressing the device. With SATA 4KB drives coming out that will no longer be the case. The sector size will be 4KB but the logical block size will remain 512-bytes. Hence we need to distinguish between the physical block size and the logical ditto. This patch renames hardsect_size to logical_block_size. Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/blk-settings.c')
-rw-r--r--block/blk-settings.c21
1 files changed, 10 insertions, 11 deletions
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 57af728d94bb..15c3164537b8 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -134,7 +134,7 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
134 q->backing_dev_info.state = 0; 134 q->backing_dev_info.state = 0;
135 q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY; 135 q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
136 blk_queue_max_sectors(q, SAFE_MAX_SECTORS); 136 blk_queue_max_sectors(q, SAFE_MAX_SECTORS);
137 blk_queue_hardsect_size(q, 512); 137 blk_queue_logical_block_size(q, 512);
138 blk_queue_dma_alignment(q, 511); 138 blk_queue_dma_alignment(q, 511);
139 blk_queue_congestion_threshold(q); 139 blk_queue_congestion_threshold(q);
140 q->nr_batching = BLK_BATCH_REQ; 140 q->nr_batching = BLK_BATCH_REQ;
@@ -288,21 +288,20 @@ void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
288EXPORT_SYMBOL(blk_queue_max_segment_size); 288EXPORT_SYMBOL(blk_queue_max_segment_size);
289 289
290/** 290/**
291 * blk_queue_hardsect_size - set hardware sector size for the queue 291 * blk_queue_logical_block_size - set logical block size for the queue
292 * @q: the request queue for the device 292 * @q: the request queue for the device
293 * @size: the hardware sector size, in bytes 293 * @size: the logical block size, in bytes
294 * 294 *
295 * Description: 295 * Description:
296 * This should typically be set to the lowest possible sector size 296 * This should be set to the lowest possible block size that the
297 * that the hardware can operate on (possible without reverting to 297 * storage device can address. The default of 512 covers most
298 * even internal read-modify-write operations). Usually the default 298 * hardware.
299 * of 512 covers most hardware.
300 **/ 299 **/
301void blk_queue_hardsect_size(struct request_queue *q, unsigned short size) 300void blk_queue_logical_block_size(struct request_queue *q, unsigned short size)
302{ 301{
303 q->hardsect_size = size; 302 q->logical_block_size = size;
304} 303}
305EXPORT_SYMBOL(blk_queue_hardsect_size); 304EXPORT_SYMBOL(blk_queue_logical_block_size);
306 305
307/* 306/*
308 * Returns the minimum that is _not_ zero, unless both are zero. 307 * Returns the minimum that is _not_ zero, unless both are zero.
@@ -324,7 +323,7 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
324 t->max_phys_segments = min_not_zero(t->max_phys_segments, b->max_phys_segments); 323 t->max_phys_segments = min_not_zero(t->max_phys_segments, b->max_phys_segments);
325 t->max_hw_segments = min_not_zero(t->max_hw_segments, b->max_hw_segments); 324 t->max_hw_segments = min_not_zero(t->max_hw_segments, b->max_hw_segments);
326 t->max_segment_size = min_not_zero(t->max_segment_size, b->max_segment_size); 325 t->max_segment_size = min_not_zero(t->max_segment_size, b->max_segment_size);
327 t->hardsect_size = max(t->hardsect_size, b->hardsect_size); 326 t->logical_block_size = max(t->logical_block_size, b->logical_block_size);
328 if (!t->queue_lock) 327 if (!t->queue_lock)
329 WARN_ON_ONCE(1); 328 WARN_ON_ONCE(1);
330 else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) { 329 else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {