aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/sysdev/axonram.c2
-rw-r--r--block/blk-integrity.c2
-rw-r--r--block/blk-settings.c21
-rw-r--r--block/blk-sysfs.c12
-rw-r--r--block/compat_ioctl.c2
-rw-r--r--block/ioctl.c2
-rw-r--r--drivers/block/cciss.c6
-rw-r--r--drivers/block/cpqarray.c4
-rw-r--r--drivers/block/hd.c2
-rw-r--r--drivers/block/mg_disk.c2
-rw-r--r--drivers/block/pktcdvd.c2
-rw-r--r--drivers/block/ps3disk.c2
-rw-r--r--drivers/block/ub.c6
-rw-r--r--drivers/block/virtio_blk.c2
-rw-r--r--drivers/block/xen-blkfront.c2
-rw-r--r--drivers/block/xsysace.c2
-rw-r--r--drivers/cdrom/gdrom.c2
-rw-r--r--drivers/cdrom/viocd.c4
-rw-r--r--drivers/char/raw.c2
-rw-r--r--drivers/ide/ide-cd.c12
-rw-r--r--drivers/md/bitmap.c4
-rw-r--r--drivers/md/dm-exception-store.c2
-rw-r--r--drivers/md/dm-log.c3
-rw-r--r--drivers/md/dm-snap-persistent.c2
-rw-r--r--drivers/md/dm-table.c12
-rw-r--r--drivers/md/md.c2
-rw-r--r--drivers/memstick/core/mspro_block.c2
-rw-r--r--drivers/message/i2o/i2o_block.c5
-rw-r--r--drivers/mmc/card/block.c2
-rw-r--r--drivers/mtd/mtd_blkdevs.c2
-rw-r--r--drivers/s390/block/dasd.c2
-rw-r--r--drivers/s390/block/dcssblk.c2
-rw-r--r--drivers/s390/block/xpram.c2
-rw-r--r--drivers/s390/char/tape_block.c2
-rw-r--r--drivers/scsi/sd.c2
-rw-r--r--drivers/scsi/sr.c2
-rw-r--r--fs/bio.c3
-rw-r--r--fs/block_dev.c6
-rw-r--r--fs/buffer.c6
-rw-r--r--fs/direct-io.c2
-rw-r--r--fs/ext3/super.c4
-rw-r--r--fs/ext4/super.c2
-rw-r--r--fs/gfs2/ops_fstype.c4
-rw-r--r--fs/gfs2/rgrp.c2
-rw-r--r--fs/nilfs2/the_nilfs.c2
-rw-r--r--fs/ntfs/super.c6
-rw-r--r--fs/ocfs2/cluster/heartbeat.c2
-rw-r--r--fs/ocfs2/super.c2
-rw-r--r--fs/partitions/ibm.c2
-rw-r--r--fs/partitions/msdos.c4
-rw-r--r--fs/udf/super.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c2
-rw-r--r--include/linux/blkdev.h14
-rw-r--r--include/linux/device-mapper.h2
54 files changed, 108 insertions, 98 deletions
diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c
index 9e105cbc5e5f..a4779912a5ca 100644
--- a/arch/powerpc/sysdev/axonram.c
+++ b/arch/powerpc/sysdev/axonram.c
@@ -250,7 +250,7 @@ axon_ram_probe(struct of_device *device, const struct of_device_id *device_id)
250 250
251 set_capacity(bank->disk, bank->size >> AXON_RAM_SECTOR_SHIFT); 251 set_capacity(bank->disk, bank->size >> AXON_RAM_SECTOR_SHIFT);
252 blk_queue_make_request(bank->disk->queue, axon_ram_make_request); 252 blk_queue_make_request(bank->disk->queue, axon_ram_make_request);
253 blk_queue_hardsect_size(bank->disk->queue, AXON_RAM_SECTOR_SIZE); 253 blk_queue_logical_block_size(bank->disk->queue, AXON_RAM_SECTOR_SIZE);
254 add_disk(bank->disk); 254 add_disk(bank->disk);
255 255
256 bank->irq_id = irq_of_parse_and_map(device->node, 0); 256 bank->irq_id = irq_of_parse_and_map(device->node, 0);
diff --git a/block/blk-integrity.c b/block/blk-integrity.c
index 91fa8e06b6a5..73e28d355688 100644
--- a/block/blk-integrity.c
+++ b/block/blk-integrity.c
@@ -340,7 +340,7 @@ int blk_integrity_register(struct gendisk *disk, struct blk_integrity *template)
340 kobject_uevent(&bi->kobj, KOBJ_ADD); 340 kobject_uevent(&bi->kobj, KOBJ_ADD);
341 341
342 bi->flags |= INTEGRITY_FLAG_READ | INTEGRITY_FLAG_WRITE; 342 bi->flags |= INTEGRITY_FLAG_READ | INTEGRITY_FLAG_WRITE;
343 bi->sector_size = disk->queue->hardsect_size; 343 bi->sector_size = queue_logical_block_size(disk->queue);
344 disk->integrity = bi; 344 disk->integrity = bi;
345 } else 345 } else
346 bi = disk->integrity; 346 bi = disk->integrity;
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 57af728d94bb..15c3164537b8 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -134,7 +134,7 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
134 q->backing_dev_info.state = 0; 134 q->backing_dev_info.state = 0;
135 q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY; 135 q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
136 blk_queue_max_sectors(q, SAFE_MAX_SECTORS); 136 blk_queue_max_sectors(q, SAFE_MAX_SECTORS);
137 blk_queue_hardsect_size(q, 512); 137 blk_queue_logical_block_size(q, 512);
138 blk_queue_dma_alignment(q, 511); 138 blk_queue_dma_alignment(q, 511);
139 blk_queue_congestion_threshold(q); 139 blk_queue_congestion_threshold(q);
140 q->nr_batching = BLK_BATCH_REQ; 140 q->nr_batching = BLK_BATCH_REQ;
@@ -288,21 +288,20 @@ void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
288EXPORT_SYMBOL(blk_queue_max_segment_size); 288EXPORT_SYMBOL(blk_queue_max_segment_size);
289 289
290/** 290/**
291 * blk_queue_hardsect_size - set hardware sector size for the queue 291 * blk_queue_logical_block_size - set logical block size for the queue
292 * @q: the request queue for the device 292 * @q: the request queue for the device
293 * @size: the hardware sector size, in bytes 293 * @size: the logical block size, in bytes
294 * 294 *
295 * Description: 295 * Description:
296 * This should typically be set to the lowest possible sector size 296 * This should be set to the lowest possible block size that the
297 * that the hardware can operate on (possible without reverting to 297 * storage device can address. The default of 512 covers most
298 * even internal read-modify-write operations). Usually the default 298 * hardware.
299 * of 512 covers most hardware.
300 **/ 299 **/
301void blk_queue_hardsect_size(struct request_queue *q, unsigned short size) 300void blk_queue_logical_block_size(struct request_queue *q, unsigned short size)
302{ 301{
303 q->hardsect_size = size; 302 q->logical_block_size = size;
304} 303}
305EXPORT_SYMBOL(blk_queue_hardsect_size); 304EXPORT_SYMBOL(blk_queue_logical_block_size);
306 305
307/* 306/*
308 * Returns the minimum that is _not_ zero, unless both are zero. 307 * Returns the minimum that is _not_ zero, unless both are zero.
@@ -324,7 +323,7 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
324 t->max_phys_segments = min_not_zero(t->max_phys_segments, b->max_phys_segments); 323 t->max_phys_segments = min_not_zero(t->max_phys_segments, b->max_phys_segments);
325 t->max_hw_segments = min_not_zero(t->max_hw_segments, b->max_hw_segments); 324 t->max_hw_segments = min_not_zero(t->max_hw_segments, b->max_hw_segments);
326 t->max_segment_size = min_not_zero(t->max_segment_size, b->max_segment_size); 325 t->max_segment_size = min_not_zero(t->max_segment_size, b->max_segment_size);
327 t->hardsect_size = max(t->hardsect_size, b->hardsect_size); 326 t->logical_block_size = max(t->logical_block_size, b->logical_block_size);
328 if (!t->queue_lock) 327 if (!t->queue_lock)
329 WARN_ON_ONCE(1); 328 WARN_ON_ONCE(1);
330 else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) { 329 else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 3ff9bba3379a..13d38b7e4d0f 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -100,9 +100,9 @@ static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
100 return queue_var_show(max_sectors_kb, (page)); 100 return queue_var_show(max_sectors_kb, (page));
101} 101}
102 102
103static ssize_t queue_hw_sector_size_show(struct request_queue *q, char *page) 103static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
104{ 104{
105 return queue_var_show(q->hardsect_size, page); 105 return queue_var_show(queue_logical_block_size(q), page);
106} 106}
107 107
108static ssize_t 108static ssize_t
@@ -249,7 +249,12 @@ static struct queue_sysfs_entry queue_iosched_entry = {
249 249
250static struct queue_sysfs_entry queue_hw_sector_size_entry = { 250static struct queue_sysfs_entry queue_hw_sector_size_entry = {
251 .attr = {.name = "hw_sector_size", .mode = S_IRUGO }, 251 .attr = {.name = "hw_sector_size", .mode = S_IRUGO },
252 .show = queue_hw_sector_size_show, 252 .show = queue_logical_block_size_show,
253};
254
255static struct queue_sysfs_entry queue_logical_block_size_entry = {
256 .attr = {.name = "logical_block_size", .mode = S_IRUGO },
257 .show = queue_logical_block_size_show,
253}; 258};
254 259
255static struct queue_sysfs_entry queue_nonrot_entry = { 260static struct queue_sysfs_entry queue_nonrot_entry = {
@@ -283,6 +288,7 @@ static struct attribute *default_attrs[] = {
283 &queue_max_sectors_entry.attr, 288 &queue_max_sectors_entry.attr,
284 &queue_iosched_entry.attr, 289 &queue_iosched_entry.attr,
285 &queue_hw_sector_size_entry.attr, 290 &queue_hw_sector_size_entry.attr,
291 &queue_logical_block_size_entry.attr,
286 &queue_nonrot_entry.attr, 292 &queue_nonrot_entry.attr,
287 &queue_nomerges_entry.attr, 293 &queue_nomerges_entry.attr,
288 &queue_rq_affinity_entry.attr, 294 &queue_rq_affinity_entry.attr,
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
index f87615dea46b..9eaa1940273a 100644
--- a/block/compat_ioctl.c
+++ b/block/compat_ioctl.c
@@ -763,7 +763,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
763 case BLKBSZGET_32: /* get the logical block size (cf. BLKSSZGET) */ 763 case BLKBSZGET_32: /* get the logical block size (cf. BLKSSZGET) */
764 return compat_put_int(arg, block_size(bdev)); 764 return compat_put_int(arg, block_size(bdev));
765 case BLKSSZGET: /* get block device hardware sector size */ 765 case BLKSSZGET: /* get block device hardware sector size */
766 return compat_put_int(arg, bdev_hardsect_size(bdev)); 766 return compat_put_int(arg, bdev_logical_block_size(bdev));
767 case BLKSECTGET: 767 case BLKSECTGET:
768 return compat_put_ushort(arg, 768 return compat_put_ushort(arg,
769 bdev_get_queue(bdev)->max_sectors); 769 bdev_get_queue(bdev)->max_sectors);
diff --git a/block/ioctl.c b/block/ioctl.c
index ad474d4bbcce..7aa97f65da82 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -311,7 +311,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
311 case BLKBSZGET: /* get the logical block size (cf. BLKSSZGET) */ 311 case BLKBSZGET: /* get the logical block size (cf. BLKSSZGET) */
312 return put_int(arg, block_size(bdev)); 312 return put_int(arg, block_size(bdev));
313 case BLKSSZGET: /* get block device hardware sector size */ 313 case BLKSSZGET: /* get block device hardware sector size */
314 return put_int(arg, bdev_hardsect_size(bdev)); 314 return put_int(arg, bdev_logical_block_size(bdev));
315 case BLKSECTGET: 315 case BLKSECTGET:
316 return put_ushort(arg, bdev_get_queue(bdev)->max_sectors); 316 return put_ushort(arg, bdev_get_queue(bdev)->max_sectors);
317 case BLKRASET: 317 case BLKRASET:
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index e714e7cce6f2..94474f5f8bce 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -1389,8 +1389,8 @@ static void cciss_add_disk(ctlr_info_t *h, struct gendisk *disk,
1389 1389
1390 disk->queue->queuedata = h; 1390 disk->queue->queuedata = h;
1391 1391
1392 blk_queue_hardsect_size(disk->queue, 1392 blk_queue_logical_block_size(disk->queue,
1393 h->drv[drv_index].block_size); 1393 h->drv[drv_index].block_size);
1394 1394
1395 /* Make sure all queue data is written out before */ 1395 /* Make sure all queue data is written out before */
1396 /* setting h->drv[drv_index].queue, as setting this */ 1396 /* setting h->drv[drv_index].queue, as setting this */
@@ -2298,7 +2298,7 @@ static int cciss_revalidate(struct gendisk *disk)
2298 cciss_geometry_inquiry(h->ctlr, logvol, 1, total_size, block_size, 2298 cciss_geometry_inquiry(h->ctlr, logvol, 1, total_size, block_size,
2299 inq_buff, drv); 2299 inq_buff, drv);
2300 2300
2301 blk_queue_hardsect_size(drv->queue, drv->block_size); 2301 blk_queue_logical_block_size(drv->queue, drv->block_size);
2302 set_capacity(disk, drv->nr_blocks); 2302 set_capacity(disk, drv->nr_blocks);
2303 2303
2304 kfree(inq_buff); 2304 kfree(inq_buff);
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index a02dcfc00f13..44fa2018f6b0 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -474,7 +474,7 @@ static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
474 disk->fops = &ida_fops; 474 disk->fops = &ida_fops;
475 if (j && !drv->nr_blks) 475 if (j && !drv->nr_blks)
476 continue; 476 continue;
477 blk_queue_hardsect_size(hba[i]->queue, drv->blk_size); 477 blk_queue_logical_block_size(hba[i]->queue, drv->blk_size);
478 set_capacity(disk, drv->nr_blks); 478 set_capacity(disk, drv->nr_blks);
479 disk->queue = hba[i]->queue; 479 disk->queue = hba[i]->queue;
480 disk->private_data = drv; 480 disk->private_data = drv;
@@ -1546,7 +1546,7 @@ static int revalidate_allvol(ctlr_info_t *host)
1546 drv_info_t *drv = &host->drv[i]; 1546 drv_info_t *drv = &host->drv[i];
1547 if (i && !drv->nr_blks) 1547 if (i && !drv->nr_blks)
1548 continue; 1548 continue;
1549 blk_queue_hardsect_size(host->queue, drv->blk_size); 1549 blk_queue_logical_block_size(host->queue, drv->blk_size);
1550 set_capacity(disk, drv->nr_blks); 1550 set_capacity(disk, drv->nr_blks);
1551 disk->queue = host->queue; 1551 disk->queue = host->queue;
1552 disk->private_data = drv; 1552 disk->private_data = drv;
diff --git a/drivers/block/hd.c b/drivers/block/hd.c
index 961de56d00a9..f65b3f369eb0 100644
--- a/drivers/block/hd.c
+++ b/drivers/block/hd.c
@@ -724,7 +724,7 @@ static int __init hd_init(void)
724 blk_queue_max_sectors(hd_queue, 255); 724 blk_queue_max_sectors(hd_queue, 255);
725 init_timer(&device_timer); 725 init_timer(&device_timer);
726 device_timer.function = hd_times_out; 726 device_timer.function = hd_times_out;
727 blk_queue_hardsect_size(hd_queue, 512); 727 blk_queue_logical_block_size(hd_queue, 512);
728 728
729 if (!NR_HD) { 729 if (!NR_HD) {
730 /* 730 /*
diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c
index c0cd0a03f698..60de5a01e71e 100644
--- a/drivers/block/mg_disk.c
+++ b/drivers/block/mg_disk.c
@@ -996,7 +996,7 @@ static int mg_probe(struct platform_device *plat_dev)
996 goto probe_err_6; 996 goto probe_err_6;
997 } 997 }
998 blk_queue_max_sectors(host->breq, MG_MAX_SECTS); 998 blk_queue_max_sectors(host->breq, MG_MAX_SECTS);
999 blk_queue_hardsect_size(host->breq, MG_SECTOR_SIZE); 999 blk_queue_logical_block_size(host->breq, MG_SECTOR_SIZE);
1000 1000
1001 init_timer(&host->timer); 1001 init_timer(&host->timer);
1002 host->timer.function = mg_times_out; 1002 host->timer.function = mg_times_out;
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index dc7a8c352da2..293f5858921d 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2657,7 +2657,7 @@ static void pkt_init_queue(struct pktcdvd_device *pd)
2657 struct request_queue *q = pd->disk->queue; 2657 struct request_queue *q = pd->disk->queue;
2658 2658
2659 blk_queue_make_request(q, pkt_make_request); 2659 blk_queue_make_request(q, pkt_make_request);
2660 blk_queue_hardsect_size(q, CD_FRAMESIZE); 2660 blk_queue_logical_block_size(q, CD_FRAMESIZE);
2661 blk_queue_max_sectors(q, PACKET_MAX_SECTORS); 2661 blk_queue_max_sectors(q, PACKET_MAX_SECTORS);
2662 blk_queue_merge_bvec(q, pkt_merge_bvec); 2662 blk_queue_merge_bvec(q, pkt_merge_bvec);
2663 q->queuedata = pd; 2663 q->queuedata = pd;
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index 338cee4cc0ba..aaeeb544228a 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -477,7 +477,7 @@ static int __devinit ps3disk_probe(struct ps3_system_bus_device *_dev)
477 blk_queue_max_sectors(queue, dev->bounce_size >> 9); 477 blk_queue_max_sectors(queue, dev->bounce_size >> 9);
478 blk_queue_segment_boundary(queue, -1UL); 478 blk_queue_segment_boundary(queue, -1UL);
479 blk_queue_dma_alignment(queue, dev->blk_size-1); 479 blk_queue_dma_alignment(queue, dev->blk_size-1);
480 blk_queue_hardsect_size(queue, dev->blk_size); 480 blk_queue_logical_block_size(queue, dev->blk_size);
481 481
482 blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN_FLUSH, 482 blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN_FLUSH,
483 ps3disk_prepare_flush); 483 ps3disk_prepare_flush);
diff --git a/drivers/block/ub.c b/drivers/block/ub.c
index e67bbae9547d..cc54473b8e77 100644
--- a/drivers/block/ub.c
+++ b/drivers/block/ub.c
@@ -722,7 +722,7 @@ static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
722 /* 722 /*
723 * build the command 723 * build the command
724 * 724 *
725 * The call to blk_queue_hardsect_size() guarantees that request 725 * The call to blk_queue_logical_block_size() guarantees that request
726 * is aligned, but it is given in terms of 512 byte units, always. 726 * is aligned, but it is given in terms of 512 byte units, always.
727 */ 727 */
728 block = blk_rq_pos(rq) >> lun->capacity.bshift; 728 block = blk_rq_pos(rq) >> lun->capacity.bshift;
@@ -1749,7 +1749,7 @@ static int ub_bd_revalidate(struct gendisk *disk)
1749 ub_revalidate(lun->udev, lun); 1749 ub_revalidate(lun->udev, lun);
1750 1750
1751 /* XXX Support sector size switching like in sr.c */ 1751 /* XXX Support sector size switching like in sr.c */
1752 blk_queue_hardsect_size(disk->queue, lun->capacity.bsize); 1752 blk_queue_logical_block_size(disk->queue, lun->capacity.bsize);
1753 set_capacity(disk, lun->capacity.nsec); 1753 set_capacity(disk, lun->capacity.nsec);
1754 // set_disk_ro(sdkp->disk, lun->readonly); 1754 // set_disk_ro(sdkp->disk, lun->readonly);
1755 1755
@@ -2324,7 +2324,7 @@ static int ub_probe_lun(struct ub_dev *sc, int lnum)
2324 blk_queue_max_phys_segments(q, UB_MAX_REQ_SG); 2324 blk_queue_max_phys_segments(q, UB_MAX_REQ_SG);
2325 blk_queue_segment_boundary(q, 0xffffffff); /* Dubious. */ 2325 blk_queue_segment_boundary(q, 0xffffffff); /* Dubious. */
2326 blk_queue_max_sectors(q, UB_MAX_SECTORS); 2326 blk_queue_max_sectors(q, UB_MAX_SECTORS);
2327 blk_queue_hardsect_size(q, lun->capacity.bsize); 2327 blk_queue_logical_block_size(q, lun->capacity.bsize);
2328 2328
2329 lun->disk = disk; 2329 lun->disk = disk;
2330 q->queuedata = lun; 2330 q->queuedata = lun;
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 511d4ae2d176..c4845b169464 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -347,7 +347,7 @@ static int virtblk_probe(struct virtio_device *vdev)
347 offsetof(struct virtio_blk_config, blk_size), 347 offsetof(struct virtio_blk_config, blk_size),
348 &blk_size); 348 &blk_size);
349 if (!err) 349 if (!err)
350 blk_queue_hardsect_size(vblk->disk->queue, blk_size); 350 blk_queue_logical_block_size(vblk->disk->queue, blk_size);
351 351
352 add_disk(vblk->disk); 352 add_disk(vblk->disk);
353 return 0; 353 return 0;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 132120ae4bde..c1996829d5ec 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -344,7 +344,7 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
344 queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq); 344 queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
345 345
346 /* Hard sector size and max sectors impersonate the equiv. hardware. */ 346 /* Hard sector size and max sectors impersonate the equiv. hardware. */
347 blk_queue_hardsect_size(rq, sector_size); 347 blk_queue_logical_block_size(rq, sector_size);
348 blk_queue_max_sectors(rq, 512); 348 blk_queue_max_sectors(rq, 512);
349 349
350 /* Each segment in a request is up to an aligned page in size. */ 350 /* Each segment in a request is up to an aligned page in size. */
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index 3a4397edab71..f08491a3a813 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -984,7 +984,7 @@ static int __devinit ace_setup(struct ace_device *ace)
984 ace->queue = blk_init_queue(ace_request, &ace->lock); 984 ace->queue = blk_init_queue(ace_request, &ace->lock);
985 if (ace->queue == NULL) 985 if (ace->queue == NULL)
986 goto err_blk_initq; 986 goto err_blk_initq;
987 blk_queue_hardsect_size(ace->queue, 512); 987 blk_queue_logical_block_size(ace->queue, 512);
988 988
989 /* 989 /*
990 * Allocate and initialize GD structure 990 * Allocate and initialize GD structure
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index 1e366ad8f680..b5621f27c4be 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -739,7 +739,7 @@ static void __devinit probe_gdrom_setupdisk(void)
739 739
740static int __devinit probe_gdrom_setupqueue(void) 740static int __devinit probe_gdrom_setupqueue(void)
741{ 741{
742 blk_queue_hardsect_size(gd.gdrom_rq, GDROM_HARD_SECTOR); 742 blk_queue_logical_block_size(gd.gdrom_rq, GDROM_HARD_SECTOR);
743 /* using DMA so memory will need to be contiguous */ 743 /* using DMA so memory will need to be contiguous */
744 blk_queue_max_hw_segments(gd.gdrom_rq, 1); 744 blk_queue_max_hw_segments(gd.gdrom_rq, 1);
745 /* set a large max size to get most from DMA */ 745 /* set a large max size to get most from DMA */
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c
index f177c2d4017f..0fff646cc2f0 100644
--- a/drivers/cdrom/viocd.c
+++ b/drivers/cdrom/viocd.c
@@ -469,8 +469,8 @@ static void vio_handle_cd_event(struct HvLpEvent *event)
469 case viocdopen: 469 case viocdopen:
470 if (event->xRc == 0) { 470 if (event->xRc == 0) {
471 di = &viocd_diskinfo[bevent->disk]; 471 di = &viocd_diskinfo[bevent->disk];
472 blk_queue_hardsect_size(di->viocd_disk->queue, 472 blk_queue_logical_block_size(di->viocd_disk->queue,
473 bevent->block_size); 473 bevent->block_size);
474 set_capacity(di->viocd_disk, 474 set_capacity(di->viocd_disk,
475 bevent->media_size * 475 bevent->media_size *
476 bevent->block_size / 512); 476 bevent->block_size / 512);
diff --git a/drivers/char/raw.c b/drivers/char/raw.c
index 20d90e6a6e50..db32f0e4c7dd 100644
--- a/drivers/char/raw.c
+++ b/drivers/char/raw.c
@@ -71,7 +71,7 @@ static int raw_open(struct inode *inode, struct file *filp)
71 err = bd_claim(bdev, raw_open); 71 err = bd_claim(bdev, raw_open);
72 if (err) 72 if (err)
73 goto out1; 73 goto out1;
74 err = set_blocksize(bdev, bdev_hardsect_size(bdev)); 74 err = set_blocksize(bdev, bdev_logical_block_size(bdev));
75 if (err) 75 if (err)
76 goto out2; 76 goto out2;
77 filp->f_flags |= O_DIRECT; 77 filp->f_flags |= O_DIRECT;
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 1799328decfb..424140c6c400 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -182,7 +182,7 @@ static void cdrom_analyze_sense_data(ide_drive_t *drive,
182 (sense->information[2] << 8) | 182 (sense->information[2] << 8) |
183 (sense->information[3]); 183 (sense->information[3]);
184 184
185 if (drive->queue->hardsect_size == 2048) 185 if (queue_logical_block_size(drive->queue) == 2048)
186 /* device sector size is 2K */ 186 /* device sector size is 2K */
187 sector <<= 2; 187 sector <<= 2;
188 188
@@ -737,7 +737,7 @@ static ide_startstop_t cdrom_start_rw(ide_drive_t *drive, struct request *rq)
737 struct request_queue *q = drive->queue; 737 struct request_queue *q = drive->queue;
738 int write = rq_data_dir(rq) == WRITE; 738 int write = rq_data_dir(rq) == WRITE;
739 unsigned short sectors_per_frame = 739 unsigned short sectors_per_frame =
740 queue_hardsect_size(q) >> SECTOR_BITS; 740 queue_logical_block_size(q) >> SECTOR_BITS;
741 741
742 ide_debug_log(IDE_DBG_RQ, "rq->cmd[0]: 0x%x, rq->cmd_flags: 0x%x, " 742 ide_debug_log(IDE_DBG_RQ, "rq->cmd[0]: 0x%x, rq->cmd_flags: 0x%x, "
743 "secs_per_frame: %u", 743 "secs_per_frame: %u",
@@ -1021,8 +1021,8 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
1021 /* save a private copy of the TOC capacity for error handling */ 1021 /* save a private copy of the TOC capacity for error handling */
1022 drive->probed_capacity = toc->capacity * sectors_per_frame; 1022 drive->probed_capacity = toc->capacity * sectors_per_frame;
1023 1023
1024 blk_queue_hardsect_size(drive->queue, 1024 blk_queue_logical_block_size(drive->queue,
1025 sectors_per_frame << SECTOR_BITS); 1025 sectors_per_frame << SECTOR_BITS);
1026 1026
1027 /* first read just the header, so we know how long the TOC is */ 1027 /* first read just the header, so we know how long the TOC is */
1028 stat = cdrom_read_tocentry(drive, 0, 1, 0, (char *) &toc->hdr, 1028 stat = cdrom_read_tocentry(drive, 0, 1, 0, (char *) &toc->hdr,
@@ -1338,7 +1338,7 @@ static int ide_cdrom_probe_capabilities(ide_drive_t *drive)
1338/* standard prep_rq_fn that builds 10 byte cmds */ 1338/* standard prep_rq_fn that builds 10 byte cmds */
1339static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq) 1339static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq)
1340{ 1340{
1341 int hard_sect = queue_hardsect_size(q); 1341 int hard_sect = queue_logical_block_size(q);
1342 long block = (long)blk_rq_pos(rq) / (hard_sect >> 9); 1342 long block = (long)blk_rq_pos(rq) / (hard_sect >> 9);
1343 unsigned long blocks = blk_rq_sectors(rq) / (hard_sect >> 9); 1343 unsigned long blocks = blk_rq_sectors(rq) / (hard_sect >> 9);
1344 1344
@@ -1543,7 +1543,7 @@ static int ide_cdrom_setup(ide_drive_t *drive)
1543 1543
1544 nslots = ide_cdrom_probe_capabilities(drive); 1544 nslots = ide_cdrom_probe_capabilities(drive);
1545 1545
1546 blk_queue_hardsect_size(q, CD_FRAMESIZE); 1546 blk_queue_logical_block_size(q, CD_FRAMESIZE);
1547 1547
1548 if (ide_cdrom_register(drive, nslots)) { 1548 if (ide_cdrom_register(drive, nslots)) {
1549 printk(KERN_ERR PFX "%s: %s failed to register device with the" 1549 printk(KERN_ERR PFX "%s: %s failed to register device with the"
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 47c68bc75a17..06b0ded1ce23 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -232,7 +232,7 @@ static struct page *read_sb_page(mddev_t *mddev, long offset,
232 target = rdev->sb_start + offset + index * (PAGE_SIZE/512); 232 target = rdev->sb_start + offset + index * (PAGE_SIZE/512);
233 233
234 if (sync_page_io(rdev->bdev, target, 234 if (sync_page_io(rdev->bdev, target,
235 roundup(size, bdev_hardsect_size(rdev->bdev)), 235 roundup(size, bdev_logical_block_size(rdev->bdev)),
236 page, READ)) { 236 page, READ)) {
237 page->index = index; 237 page->index = index;
238 attach_page_buffers(page, NULL); /* so that free_buffer will 238 attach_page_buffers(page, NULL); /* so that free_buffer will
@@ -287,7 +287,7 @@ static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
287 int size = PAGE_SIZE; 287 int size = PAGE_SIZE;
288 if (page->index == bitmap->file_pages-1) 288 if (page->index == bitmap->file_pages-1)
289 size = roundup(bitmap->last_page_size, 289 size = roundup(bitmap->last_page_size,
290 bdev_hardsect_size(rdev->bdev)); 290 bdev_logical_block_size(rdev->bdev));
291 /* Just make sure we aren't corrupting data or 291 /* Just make sure we aren't corrupting data or
292 * metadata 292 * metadata
293 */ 293 */
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
index a2e26c242141..75d8081a9041 100644
--- a/drivers/md/dm-exception-store.c
+++ b/drivers/md/dm-exception-store.c
@@ -178,7 +178,7 @@ static int set_chunk_size(struct dm_exception_store *store,
178 } 178 }
179 179
180 /* Validate the chunk size against the device block size */ 180 /* Validate the chunk size against the device block size */
181 if (chunk_size_ulong % (bdev_hardsect_size(store->cow->bdev) >> 9)) { 181 if (chunk_size_ulong % (bdev_logical_block_size(store->cow->bdev) >> 9)) {
182 *error = "Chunk size is not a multiple of device blocksize"; 182 *error = "Chunk size is not a multiple of device blocksize";
183 return -EINVAL; 183 return -EINVAL;
184 } 184 }
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index be233bc4d917..6fa8ccf91c70 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -413,7 +413,8 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti,
413 * Buffer holds both header and bitset. 413 * Buffer holds both header and bitset.
414 */ 414 */
415 buf_size = dm_round_up((LOG_OFFSET << SECTOR_SHIFT) + 415 buf_size = dm_round_up((LOG_OFFSET << SECTOR_SHIFT) +
416 bitset_size, ti->limits.hardsect_size); 416 bitset_size,
417 ti->limits.logical_block_size);
417 418
418 if (buf_size > dev->bdev->bd_inode->i_size) { 419 if (buf_size > dev->bdev->bd_inode->i_size) {
419 DMWARN("log device %s too small: need %llu bytes", 420 DMWARN("log device %s too small: need %llu bytes",
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index e75c6dd76a9a..2662a41337e7 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -282,7 +282,7 @@ static int read_header(struct pstore *ps, int *new_snapshot)
282 */ 282 */
283 if (!ps->store->chunk_size) { 283 if (!ps->store->chunk_size) {
284 ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS, 284 ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS,
285 bdev_hardsect_size(ps->store->cow->bdev) >> 9); 285 bdev_logical_block_size(ps->store->cow->bdev) >> 9);
286 ps->store->chunk_mask = ps->store->chunk_size - 1; 286 ps->store->chunk_mask = ps->store->chunk_size - 1;
287 ps->store->chunk_shift = ffs(ps->store->chunk_size) - 1; 287 ps->store->chunk_shift = ffs(ps->store->chunk_size) - 1;
288 chunk_size_supplied = 0; 288 chunk_size_supplied = 0;
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 429b50b975d5..65e2d9759857 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -108,7 +108,8 @@ static void combine_restrictions_low(struct io_restrictions *lhs,
108 lhs->max_hw_segments = 108 lhs->max_hw_segments =
109 min_not_zero(lhs->max_hw_segments, rhs->max_hw_segments); 109 min_not_zero(lhs->max_hw_segments, rhs->max_hw_segments);
110 110
111 lhs->hardsect_size = max(lhs->hardsect_size, rhs->hardsect_size); 111 lhs->logical_block_size = max(lhs->logical_block_size,
112 rhs->logical_block_size);
112 113
113 lhs->max_segment_size = 114 lhs->max_segment_size =
114 min_not_zero(lhs->max_segment_size, rhs->max_segment_size); 115 min_not_zero(lhs->max_segment_size, rhs->max_segment_size);
@@ -529,7 +530,8 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
529 rs->max_hw_segments = 530 rs->max_hw_segments =
530 min_not_zero(rs->max_hw_segments, q->max_hw_segments); 531 min_not_zero(rs->max_hw_segments, q->max_hw_segments);
531 532
532 rs->hardsect_size = max(rs->hardsect_size, q->hardsect_size); 533 rs->logical_block_size = max(rs->logical_block_size,
534 queue_logical_block_size(q));
533 535
534 rs->max_segment_size = 536 rs->max_segment_size =
535 min_not_zero(rs->max_segment_size, q->max_segment_size); 537 min_not_zero(rs->max_segment_size, q->max_segment_size);
@@ -683,8 +685,8 @@ static void check_for_valid_limits(struct io_restrictions *rs)
683 rs->max_phys_segments = MAX_PHYS_SEGMENTS; 685 rs->max_phys_segments = MAX_PHYS_SEGMENTS;
684 if (!rs->max_hw_segments) 686 if (!rs->max_hw_segments)
685 rs->max_hw_segments = MAX_HW_SEGMENTS; 687 rs->max_hw_segments = MAX_HW_SEGMENTS;
686 if (!rs->hardsect_size) 688 if (!rs->logical_block_size)
687 rs->hardsect_size = 1 << SECTOR_SHIFT; 689 rs->logical_block_size = 1 << SECTOR_SHIFT;
688 if (!rs->max_segment_size) 690 if (!rs->max_segment_size)
689 rs->max_segment_size = MAX_SEGMENT_SIZE; 691 rs->max_segment_size = MAX_SEGMENT_SIZE;
690 if (!rs->seg_boundary_mask) 692 if (!rs->seg_boundary_mask)
@@ -914,7 +916,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q)
914 blk_queue_max_sectors(q, t->limits.max_sectors); 916 blk_queue_max_sectors(q, t->limits.max_sectors);
915 q->max_phys_segments = t->limits.max_phys_segments; 917 q->max_phys_segments = t->limits.max_phys_segments;
916 q->max_hw_segments = t->limits.max_hw_segments; 918 q->max_hw_segments = t->limits.max_hw_segments;
917 q->hardsect_size = t->limits.hardsect_size; 919 q->logical_block_size = t->limits.logical_block_size;
918 q->max_segment_size = t->limits.max_segment_size; 920 q->max_segment_size = t->limits.max_segment_size;
919 q->max_hw_sectors = t->limits.max_hw_sectors; 921 q->max_hw_sectors = t->limits.max_hw_sectors;
920 q->seg_boundary_mask = t->limits.seg_boundary_mask; 922 q->seg_boundary_mask = t->limits.seg_boundary_mask;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index fccc8343a250..4cbc19f5c304 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1202,7 +1202,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
1202 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read)); 1202 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
1203 1203
1204 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256; 1204 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
1205 bmask = queue_hardsect_size(rdev->bdev->bd_disk->queue)-1; 1205 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1206 if (rdev->sb_size & bmask) 1206 if (rdev->sb_size & bmask)
1207 rdev->sb_size = (rdev->sb_size | bmask) + 1; 1207 rdev->sb_size = (rdev->sb_size | bmask) + 1;
1208 1208
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index c0bebc6a2f2c..7847bbc1440d 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -1242,7 +1242,7 @@ static int mspro_block_init_disk(struct memstick_dev *card)
1242 1242
1243 sprintf(msb->disk->disk_name, "mspblk%d", disk_id); 1243 sprintf(msb->disk->disk_name, "mspblk%d", disk_id);
1244 1244
1245 blk_queue_hardsect_size(msb->queue, msb->page_size); 1245 blk_queue_logical_block_size(msb->queue, msb->page_size);
1246 1246
1247 capacity = be16_to_cpu(sys_info->user_block_count); 1247 capacity = be16_to_cpu(sys_info->user_block_count);
1248 capacity *= be16_to_cpu(sys_info->block_size); 1248 capacity *= be16_to_cpu(sys_info->block_size);
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index 6573ef4408f1..335d4c78a775 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -794,8 +794,9 @@ static int i2o_block_transfer(struct request *req)
794 if (c->adaptec) { 794 if (c->adaptec) {
795 u8 cmd[10]; 795 u8 cmd[10];
796 u32 scsi_flags; 796 u32 scsi_flags;
797 u16 hwsec = queue_hardsect_size(req->q) >> KERNEL_SECTOR_SHIFT; 797 u16 hwsec;
798 798
799 hwsec = queue_logical_block_size(req->q) >> KERNEL_SECTOR_SHIFT;
799 memset(cmd, 0, 10); 800 memset(cmd, 0, 10);
800 801
801 sgl_offset = SGL_OFFSET_12; 802 sgl_offset = SGL_OFFSET_12;
@@ -1078,7 +1079,7 @@ static int i2o_block_probe(struct device *dev)
1078 */ 1079 */
1079 if (!i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) || 1080 if (!i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) ||
1080 !i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4)) { 1081 !i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4)) {
1081 blk_queue_hardsect_size(queue, le32_to_cpu(blocksize)); 1082 blk_queue_logical_block_size(queue, le32_to_cpu(blocksize));
1082 } else 1083 } else
1083 osm_warn("unable to get blocksize of %s\n", gd->disk_name); 1084 osm_warn("unable to get blocksize of %s\n", gd->disk_name);
1084 1085
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index c5df86546458..98ffc41eaf2c 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -521,7 +521,7 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
521 521
522 sprintf(md->disk->disk_name, "mmcblk%d", devidx); 522 sprintf(md->disk->disk_name, "mmcblk%d", devidx);
523 523
524 blk_queue_hardsect_size(md->queue.queue, 512); 524 blk_queue_logical_block_size(md->queue.queue, 512);
525 525
526 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) { 526 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
527 /* 527 /*
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 502622f628bc..aaac3b6800b7 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -378,7 +378,7 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
378 } 378 }
379 379
380 tr->blkcore_priv->rq->queuedata = tr; 380 tr->blkcore_priv->rq->queuedata = tr;
381 blk_queue_hardsect_size(tr->blkcore_priv->rq, tr->blksize); 381 blk_queue_logical_block_size(tr->blkcore_priv->rq, tr->blksize);
382 if (tr->discard) 382 if (tr->discard)
383 blk_queue_set_discard(tr->blkcore_priv->rq, 383 blk_queue_set_discard(tr->blkcore_priv->rq,
384 blktrans_discard_request); 384 blktrans_discard_request);
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index e64f62d5e0fc..27a1be0cd4d4 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1990,7 +1990,7 @@ static void dasd_setup_queue(struct dasd_block *block)
1990{ 1990{
1991 int max; 1991 int max;
1992 1992
1993 blk_queue_hardsect_size(block->request_queue, block->bp_block); 1993 blk_queue_logical_block_size(block->request_queue, block->bp_block);
1994 max = block->base->discipline->max_blocks << block->s2b_shift; 1994 max = block->base->discipline->max_blocks << block->s2b_shift;
1995 blk_queue_max_sectors(block->request_queue, max); 1995 blk_queue_max_sectors(block->request_queue, max);
1996 blk_queue_max_phys_segments(block->request_queue, -1L); 1996 blk_queue_max_phys_segments(block->request_queue, -1L);
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index cfdcf1aed33c..a4c7ffcd9987 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -602,7 +602,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
602 dev_info->gd->private_data = dev_info; 602 dev_info->gd->private_data = dev_info;
603 dev_info->gd->driverfs_dev = &dev_info->dev; 603 dev_info->gd->driverfs_dev = &dev_info->dev;
604 blk_queue_make_request(dev_info->dcssblk_queue, dcssblk_make_request); 604 blk_queue_make_request(dev_info->dcssblk_queue, dcssblk_make_request);
605 blk_queue_hardsect_size(dev_info->dcssblk_queue, 4096); 605 blk_queue_logical_block_size(dev_info->dcssblk_queue, 4096);
606 606
607 seg_byte_size = (dev_info->end - dev_info->start + 1); 607 seg_byte_size = (dev_info->end - dev_info->start + 1);
608 set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors 608 set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index 76814f3e898a..0ae0c83ef879 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -343,7 +343,7 @@ static int __init xpram_setup_blkdev(void)
343 goto out; 343 goto out;
344 } 344 }
345 blk_queue_make_request(xpram_queues[i], xpram_make_request); 345 blk_queue_make_request(xpram_queues[i], xpram_make_request);
346 blk_queue_hardsect_size(xpram_queues[i], 4096); 346 blk_queue_logical_block_size(xpram_queues[i], 4096);
347 } 347 }
348 348
349 /* 349 /*
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index 1e7967675980..47ff695255ea 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -222,7 +222,7 @@ tapeblock_setup_device(struct tape_device * device)
222 if (rc) 222 if (rc)
223 goto cleanup_queue; 223 goto cleanup_queue;
224 224
225 blk_queue_hardsect_size(blkdat->request_queue, TAPEBLOCK_HSEC_SIZE); 225 blk_queue_logical_block_size(blkdat->request_queue, TAPEBLOCK_HSEC_SIZE);
226 blk_queue_max_sectors(blkdat->request_queue, TAPEBLOCK_MAX_SEC); 226 blk_queue_max_sectors(blkdat->request_queue, TAPEBLOCK_MAX_SEC);
227 blk_queue_max_phys_segments(blkdat->request_queue, -1L); 227 blk_queue_max_phys_segments(blkdat->request_queue, -1L);
228 blk_queue_max_hw_segments(blkdat->request_queue, -1L); 228 blk_queue_max_hw_segments(blkdat->request_queue, -1L);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 40d2860f235a..bcf3bd40bbd5 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1510,7 +1510,7 @@ got_data:
1510 */ 1510 */
1511 sector_size = 512; 1511 sector_size = 512;
1512 } 1512 }
1513 blk_queue_hardsect_size(sdp->request_queue, sector_size); 1513 blk_queue_logical_block_size(sdp->request_queue, sector_size);
1514 1514
1515 { 1515 {
1516 char cap_str_2[10], cap_str_10[10]; 1516 char cap_str_2[10], cap_str_10[10];
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index fddba53c7fe5..cd350dfc1216 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -727,7 +727,7 @@ static void get_sectorsize(struct scsi_cd *cd)
727 } 727 }
728 728
729 queue = cd->device->request_queue; 729 queue = cd->device->request_queue;
730 blk_queue_hardsect_size(queue, sector_size); 730 blk_queue_logical_block_size(queue, sector_size);
731 731
732 return; 732 return;
733} 733}
diff --git a/fs/bio.c b/fs/bio.c
index 81dc93e72535..4445c3821730 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -1490,11 +1490,12 @@ struct bio_pair *bio_split(struct bio *bi, int first_sectors)
1490sector_t bio_sector_offset(struct bio *bio, unsigned short index, 1490sector_t bio_sector_offset(struct bio *bio, unsigned short index,
1491 unsigned int offset) 1491 unsigned int offset)
1492{ 1492{
1493 unsigned int sector_sz = queue_hardsect_size(bio->bi_bdev->bd_disk->queue); 1493 unsigned int sector_sz;
1494 struct bio_vec *bv; 1494 struct bio_vec *bv;
1495 sector_t sectors; 1495 sector_t sectors;
1496 int i; 1496 int i;
1497 1497
1498 sector_sz = queue_logical_block_size(bio->bi_bdev->bd_disk->queue);
1498 sectors = 0; 1499 sectors = 0;
1499 1500
1500 if (index >= bio->bi_idx) 1501 if (index >= bio->bi_idx)
diff --git a/fs/block_dev.c b/fs/block_dev.c
index a85fe310fc6f..a29b4dcc1bca 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -76,7 +76,7 @@ int set_blocksize(struct block_device *bdev, int size)
76 return -EINVAL; 76 return -EINVAL;
77 77
78 /* Size cannot be smaller than the size supported by the device */ 78 /* Size cannot be smaller than the size supported by the device */
79 if (size < bdev_hardsect_size(bdev)) 79 if (size < bdev_logical_block_size(bdev))
80 return -EINVAL; 80 return -EINVAL;
81 81
82 /* Don't change the size if it is same as current */ 82 /* Don't change the size if it is same as current */
@@ -106,7 +106,7 @@ EXPORT_SYMBOL(sb_set_blocksize);
106 106
107int sb_min_blocksize(struct super_block *sb, int size) 107int sb_min_blocksize(struct super_block *sb, int size)
108{ 108{
109 int minsize = bdev_hardsect_size(sb->s_bdev); 109 int minsize = bdev_logical_block_size(sb->s_bdev);
110 if (size < minsize) 110 if (size < minsize)
111 size = minsize; 111 size = minsize;
112 return sb_set_blocksize(sb, size); 112 return sb_set_blocksize(sb, size);
@@ -1117,7 +1117,7 @@ EXPORT_SYMBOL(check_disk_change);
1117 1117
1118void bd_set_size(struct block_device *bdev, loff_t size) 1118void bd_set_size(struct block_device *bdev, loff_t size)
1119{ 1119{
1120 unsigned bsize = bdev_hardsect_size(bdev); 1120 unsigned bsize = bdev_logical_block_size(bdev);
1121 1121
1122 bdev->bd_inode->i_size = size; 1122 bdev->bd_inode->i_size = size;
1123 while (bsize < PAGE_CACHE_SIZE) { 1123 while (bsize < PAGE_CACHE_SIZE) {
diff --git a/fs/buffer.c b/fs/buffer.c
index aed297739eb0..36e2bbc60ec7 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -1085,12 +1085,12 @@ static struct buffer_head *
1085__getblk_slow(struct block_device *bdev, sector_t block, int size) 1085__getblk_slow(struct block_device *bdev, sector_t block, int size)
1086{ 1086{
1087 /* Size must be multiple of hard sectorsize */ 1087 /* Size must be multiple of hard sectorsize */
1088 if (unlikely(size & (bdev_hardsect_size(bdev)-1) || 1088 if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
1089 (size < 512 || size > PAGE_SIZE))) { 1089 (size < 512 || size > PAGE_SIZE))) {
1090 printk(KERN_ERR "getblk(): invalid block size %d requested\n", 1090 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1091 size); 1091 size);
1092 printk(KERN_ERR "hardsect size: %d\n", 1092 printk(KERN_ERR "logical block size: %d\n",
1093 bdev_hardsect_size(bdev)); 1093 bdev_logical_block_size(bdev));
1094 1094
1095 dump_stack(); 1095 dump_stack();
1096 return NULL; 1096 return NULL;
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 05763bbc2050..8b10b87dc01a 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -1127,7 +1127,7 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
1127 rw = WRITE_ODIRECT; 1127 rw = WRITE_ODIRECT;
1128 1128
1129 if (bdev) 1129 if (bdev)
1130 bdev_blkbits = blksize_bits(bdev_hardsect_size(bdev)); 1130 bdev_blkbits = blksize_bits(bdev_logical_block_size(bdev));
1131 1131
1132 if (offset & blocksize_mask) { 1132 if (offset & blocksize_mask) {
1133 if (bdev) 1133 if (bdev)
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 599dbfe504c3..acbb94fdf903 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -1696,7 +1696,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
1696 goto failed_mount; 1696 goto failed_mount;
1697 } 1697 }
1698 1698
1699 hblock = bdev_hardsect_size(sb->s_bdev); 1699 hblock = bdev_logical_block_size(sb->s_bdev);
1700 if (sb->s_blocksize != blocksize) { 1700 if (sb->s_blocksize != blocksize) {
1701 /* 1701 /*
1702 * Make sure the blocksize for the filesystem is larger 1702 * Make sure the blocksize for the filesystem is larger
@@ -2119,7 +2119,7 @@ static journal_t *ext3_get_dev_journal(struct super_block *sb,
2119 } 2119 }
2120 2120
2121 blocksize = sb->s_blocksize; 2121 blocksize = sb->s_blocksize;
2122 hblock = bdev_hardsect_size(bdev); 2122 hblock = bdev_logical_block_size(bdev);
2123 if (blocksize < hblock) { 2123 if (blocksize < hblock) {
2124 printk(KERN_ERR 2124 printk(KERN_ERR
2125 "EXT3-fs: blocksize too small for journal device.\n"); 2125 "EXT3-fs: blocksize too small for journal device.\n");
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 2958f4e6f222..a30549f7a305 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -2962,7 +2962,7 @@ static journal_t *ext4_get_dev_journal(struct super_block *sb,
2962 } 2962 }
2963 2963
2964 blocksize = sb->s_blocksize; 2964 blocksize = sb->s_blocksize;
2965 hblock = bdev_hardsect_size(bdev); 2965 hblock = bdev_logical_block_size(bdev);
2966 if (blocksize < hblock) { 2966 if (blocksize < hblock) {
2967 printk(KERN_ERR 2967 printk(KERN_ERR
2968 "EXT4-fs: blocksize too small for journal device.\n"); 2968 "EXT4-fs: blocksize too small for journal device.\n");
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 1ff9473ea753..a3b2ac989fc3 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -526,11 +526,11 @@ static int init_sb(struct gfs2_sbd *sdp, int silent)
526 } 526 }
527 527
528 /* Set up the buffer cache and SB for real */ 528 /* Set up the buffer cache and SB for real */
529 if (sdp->sd_sb.sb_bsize < bdev_hardsect_size(sb->s_bdev)) { 529 if (sdp->sd_sb.sb_bsize < bdev_logical_block_size(sb->s_bdev)) {
530 ret = -EINVAL; 530 ret = -EINVAL;
531 fs_err(sdp, "FS block size (%u) is too small for device " 531 fs_err(sdp, "FS block size (%u) is too small for device "
532 "block size (%u)\n", 532 "block size (%u)\n",
533 sdp->sd_sb.sb_bsize, bdev_hardsect_size(sb->s_bdev)); 533 sdp->sd_sb.sb_bsize, bdev_logical_block_size(sb->s_bdev));
534 goto out; 534 goto out;
535 } 535 }
536 if (sdp->sd_sb.sb_bsize > PAGE_SIZE) { 536 if (sdp->sd_sb.sb_bsize > PAGE_SIZE) {
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 565038243fa2..a971d24e10ce 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -845,7 +845,7 @@ static void gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
845 struct super_block *sb = sdp->sd_vfs; 845 struct super_block *sb = sdp->sd_vfs;
846 struct block_device *bdev = sb->s_bdev; 846 struct block_device *bdev = sb->s_bdev;
847 const unsigned int sects_per_blk = sdp->sd_sb.sb_bsize / 847 const unsigned int sects_per_blk = sdp->sd_sb.sb_bsize /
848 bdev_hardsect_size(sb->s_bdev); 848 bdev_logical_block_size(sb->s_bdev);
849 u64 blk; 849 u64 blk;
850 sector_t start = 0; 850 sector_t start = 0;
851 sector_t nr_sects = 0; 851 sector_t nr_sects = 0;
diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
index 7f65b3be4aa9..a91f15b8673c 100644
--- a/fs/nilfs2/the_nilfs.c
+++ b/fs/nilfs2/the_nilfs.c
@@ -515,7 +515,7 @@ int init_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi, char *data)
515 515
516 blocksize = BLOCK_SIZE << le32_to_cpu(sbp->s_log_block_size); 516 blocksize = BLOCK_SIZE << le32_to_cpu(sbp->s_log_block_size);
517 if (sb->s_blocksize != blocksize) { 517 if (sb->s_blocksize != blocksize) {
518 int hw_blocksize = bdev_hardsect_size(sb->s_bdev); 518 int hw_blocksize = bdev_logical_block_size(sb->s_bdev);
519 519
520 if (blocksize < hw_blocksize) { 520 if (blocksize < hw_blocksize) {
521 printk(KERN_ERR 521 printk(KERN_ERR
diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
index f76951dcd4a6..6aa7c4713536 100644
--- a/fs/ntfs/super.c
+++ b/fs/ntfs/super.c
@@ -25,7 +25,7 @@
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/string.h> 26#include <linux/string.h>
27#include <linux/spinlock.h> 27#include <linux/spinlock.h>
28#include <linux/blkdev.h> /* For bdev_hardsect_size(). */ 28#include <linux/blkdev.h> /* For bdev_logical_block_size(). */
29#include <linux/backing-dev.h> 29#include <linux/backing-dev.h>
30#include <linux/buffer_head.h> 30#include <linux/buffer_head.h>
31#include <linux/vfs.h> 31#include <linux/vfs.h>
@@ -2785,13 +2785,13 @@ static int ntfs_fill_super(struct super_block *sb, void *opt, const int silent)
2785 goto err_out_now; 2785 goto err_out_now;
2786 2786
2787 /* We support sector sizes up to the PAGE_CACHE_SIZE. */ 2787 /* We support sector sizes up to the PAGE_CACHE_SIZE. */
2788 if (bdev_hardsect_size(sb->s_bdev) > PAGE_CACHE_SIZE) { 2788 if (bdev_logical_block_size(sb->s_bdev) > PAGE_CACHE_SIZE) {
2789 if (!silent) 2789 if (!silent)
2790 ntfs_error(sb, "Device has unsupported sector size " 2790 ntfs_error(sb, "Device has unsupported sector size "
2791 "(%i). The maximum supported sector " 2791 "(%i). The maximum supported sector "
2792 "size on this architecture is %lu " 2792 "size on this architecture is %lu "
2793 "bytes.", 2793 "bytes.",
2794 bdev_hardsect_size(sb->s_bdev), 2794 bdev_logical_block_size(sb->s_bdev),
2795 PAGE_CACHE_SIZE); 2795 PAGE_CACHE_SIZE);
2796 goto err_out_now; 2796 goto err_out_now;
2797 } 2797 }
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 4f85eceab376..09cc25d04611 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -1371,7 +1371,7 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg,
1371 1371
1372 bdevname(reg->hr_bdev, reg->hr_dev_name); 1372 bdevname(reg->hr_bdev, reg->hr_dev_name);
1373 1373
1374 sectsize = bdev_hardsect_size(reg->hr_bdev); 1374 sectsize = bdev_logical_block_size(reg->hr_bdev);
1375 if (sectsize != reg->hr_block_bytes) { 1375 if (sectsize != reg->hr_block_bytes) {
1376 mlog(ML_ERROR, 1376 mlog(ML_ERROR,
1377 "blocksize %u incorrect for device, expected %d", 1377 "blocksize %u incorrect for device, expected %d",
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 79ff8d9d37e0..5c6163f55039 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -713,7 +713,7 @@ static int ocfs2_sb_probe(struct super_block *sb,
713 *bh = NULL; 713 *bh = NULL;
714 714
715 /* may be > 512 */ 715 /* may be > 512 */
716 *sector_size = bdev_hardsect_size(sb->s_bdev); 716 *sector_size = bdev_logical_block_size(sb->s_bdev);
717 if (*sector_size > OCFS2_MAX_BLOCKSIZE) { 717 if (*sector_size > OCFS2_MAX_BLOCKSIZE) {
718 mlog(ML_ERROR, "Hardware sector size too large: %d (max=%d)\n", 718 mlog(ML_ERROR, "Hardware sector size too large: %d (max=%d)\n",
719 *sector_size, OCFS2_MAX_BLOCKSIZE); 719 *sector_size, OCFS2_MAX_BLOCKSIZE);
diff --git a/fs/partitions/ibm.c b/fs/partitions/ibm.c
index 46297683cd34..fc71aab08460 100644
--- a/fs/partitions/ibm.c
+++ b/fs/partitions/ibm.c
@@ -76,7 +76,7 @@ ibm_partition(struct parsed_partitions *state, struct block_device *bdev)
76 Sector sect; 76 Sector sect;
77 77
78 res = 0; 78 res = 0;
79 blocksize = bdev_hardsect_size(bdev); 79 blocksize = bdev_logical_block_size(bdev);
80 if (blocksize <= 0) 80 if (blocksize <= 0)
81 goto out_exit; 81 goto out_exit;
82 i_size = i_size_read(bdev->bd_inode); 82 i_size = i_size_read(bdev->bd_inode);
diff --git a/fs/partitions/msdos.c b/fs/partitions/msdos.c
index 796511886f28..0028d2ef0662 100644
--- a/fs/partitions/msdos.c
+++ b/fs/partitions/msdos.c
@@ -110,7 +110,7 @@ parse_extended(struct parsed_partitions *state, struct block_device *bdev,
110 Sector sect; 110 Sector sect;
111 unsigned char *data; 111 unsigned char *data;
112 u32 this_sector, this_size; 112 u32 this_sector, this_size;
113 int sector_size = bdev_hardsect_size(bdev) / 512; 113 int sector_size = bdev_logical_block_size(bdev) / 512;
114 int loopct = 0; /* number of links followed 114 int loopct = 0; /* number of links followed
115 without finding a data partition */ 115 without finding a data partition */
116 int i; 116 int i;
@@ -415,7 +415,7 @@ static struct {
415 415
416int msdos_partition(struct parsed_partitions *state, struct block_device *bdev) 416int msdos_partition(struct parsed_partitions *state, struct block_device *bdev)
417{ 417{
418 int sector_size = bdev_hardsect_size(bdev) / 512; 418 int sector_size = bdev_logical_block_size(bdev) / 512;
419 Sector sect; 419 Sector sect;
420 unsigned char *data; 420 unsigned char *data;
421 struct partition *p; 421 struct partition *p;
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 72348cc855a4..0ba44107d8f1 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -1915,7 +1915,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
1915 if (uopt.flags & (1 << UDF_FLAG_BLOCKSIZE_SET)) { 1915 if (uopt.flags & (1 << UDF_FLAG_BLOCKSIZE_SET)) {
1916 ret = udf_load_vrs(sb, &uopt, silent, &fileset); 1916 ret = udf_load_vrs(sb, &uopt, silent, &fileset);
1917 } else { 1917 } else {
1918 uopt.blocksize = bdev_hardsect_size(sb->s_bdev); 1918 uopt.blocksize = bdev_logical_block_size(sb->s_bdev);
1919 ret = udf_load_vrs(sb, &uopt, silent, &fileset); 1919 ret = udf_load_vrs(sb, &uopt, silent, &fileset);
1920 if (!ret && uopt.blocksize != UDF_DEFAULT_BLOCKSIZE) { 1920 if (!ret && uopt.blocksize != UDF_DEFAULT_BLOCKSIZE) {
1921 if (!silent) 1921 if (!silent)
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index e28800a9f2b5..1418b916fc27 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -1501,7 +1501,7 @@ xfs_setsize_buftarg_early(
1501 struct block_device *bdev) 1501 struct block_device *bdev)
1502{ 1502{
1503 return xfs_setsize_buftarg_flags(btp, 1503 return xfs_setsize_buftarg_flags(btp,
1504 PAGE_CACHE_SIZE, bdev_hardsect_size(bdev), 0); 1504 PAGE_CACHE_SIZE, bdev_logical_block_size(bdev), 0);
1505} 1505}
1506 1506
1507int 1507int
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 56ce53fce72e..872b78b7a101 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -391,7 +391,7 @@ struct request_queue
391 unsigned int max_hw_sectors; 391 unsigned int max_hw_sectors;
392 unsigned short max_phys_segments; 392 unsigned short max_phys_segments;
393 unsigned short max_hw_segments; 393 unsigned short max_hw_segments;
394 unsigned short hardsect_size; 394 unsigned short logical_block_size;
395 unsigned int max_segment_size; 395 unsigned int max_segment_size;
396 396
397 unsigned long seg_boundary_mask; 397 unsigned long seg_boundary_mask;
@@ -901,7 +901,7 @@ extern void blk_queue_max_sectors(struct request_queue *, unsigned int);
901extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); 901extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short);
902extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); 902extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short);
903extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 903extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
904extern void blk_queue_hardsect_size(struct request_queue *, unsigned short); 904extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
905extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); 905extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
906extern void blk_queue_dma_pad(struct request_queue *, unsigned int); 906extern void blk_queue_dma_pad(struct request_queue *, unsigned int);
907extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); 907extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
@@ -988,19 +988,19 @@ extern void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter);
988 988
989#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) 989#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
990 990
991static inline int queue_hardsect_size(struct request_queue *q) 991static inline unsigned short queue_logical_block_size(struct request_queue *q)
992{ 992{
993 int retval = 512; 993 int retval = 512;
994 994
995 if (q && q->hardsect_size) 995 if (q && q->logical_block_size)
996 retval = q->hardsect_size; 996 retval = q->logical_block_size;
997 997
998 return retval; 998 return retval;
999} 999}
1000 1000
1001static inline int bdev_hardsect_size(struct block_device *bdev) 1001static inline unsigned short bdev_logical_block_size(struct block_device *bdev)
1002{ 1002{
1003 return queue_hardsect_size(bdev_get_queue(bdev)); 1003 return queue_logical_block_size(bdev_get_queue(bdev));
1004} 1004}
1005 1005
1006static inline int queue_dma_alignment(struct request_queue *q) 1006static inline int queue_dma_alignment(struct request_queue *q)
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index ded2d7c42668..49c2362977fd 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -149,7 +149,7 @@ struct io_restrictions {
149 unsigned max_hw_sectors; 149 unsigned max_hw_sectors;
150 unsigned max_sectors; 150 unsigned max_sectors;
151 unsigned max_segment_size; 151 unsigned max_segment_size;
152 unsigned short hardsect_size; 152 unsigned short logical_block_size;
153 unsigned short max_hw_segments; 153 unsigned short max_hw_segments;
154 unsigned short max_phys_segments; 154 unsigned short max_phys_segments;
155 unsigned char no_cluster; /* inverted so that 0 is default */ 155 unsigned char no_cluster; /* inverted so that 0 is default */