diff options
-rw-r--r-- | Documentation/ABI/testing/sysfs-block | 59 | ||||
-rw-r--r-- | block/blk-settings.c | 186 | ||||
-rw-r--r-- | block/blk-sysfs.c | 33 | ||||
-rw-r--r-- | block/genhd.c | 11 | ||||
-rw-r--r-- | fs/partitions/check.c | 10 | ||||
-rw-r--r-- | include/linux/blkdev.h | 47 | ||||
-rw-r--r-- | include/linux/genhd.h | 1 |
7 files changed, 347 insertions, 0 deletions
diff --git a/Documentation/ABI/testing/sysfs-block b/Documentation/ABI/testing/sysfs-block index 44f52a4f5903..cbbd3e069945 100644 --- a/Documentation/ABI/testing/sysfs-block +++ b/Documentation/ABI/testing/sysfs-block | |||
@@ -60,3 +60,62 @@ Description: | |||
60 | Indicates whether the block layer should automatically | 60 | Indicates whether the block layer should automatically |
61 | generate checksums for write requests bound for | 61 | generate checksums for write requests bound for |
62 | devices that support receiving integrity metadata. | 62 | devices that support receiving integrity metadata. |
63 | |||
64 | What: /sys/block/<disk>/alignment_offset | ||
65 | Date: April 2009 | ||
66 | Contact: Martin K. Petersen <martin.petersen@oracle.com> | ||
67 | Description: | ||
68 | Storage devices may report a physical block size that is | ||
69 | bigger than the logical block size (for instance a drive | ||
70 | with 4KB physical sectors exposing 512-byte logical | ||
71 | blocks to the operating system). This parameter | ||
72 | indicates how many bytes the beginning of the device is | ||
73 | offset from the disk's natural alignment. | ||
74 | |||
75 | What: /sys/block/<disk>/<partition>/alignment_offset | ||
76 | Date: April 2009 | ||
77 | Contact: Martin K. Petersen <martin.petersen@oracle.com> | ||
78 | Description: | ||
79 | Storage devices may report a physical block size that is | ||
80 | bigger than the logical block size (for instance a drive | ||
81 | with 4KB physical sectors exposing 512-byte logical | ||
82 | blocks to the operating system). This parameter | ||
83 | indicates how many bytes the beginning of the partition | ||
84 | is offset from the disk's natural alignment. | ||
85 | |||
86 | What: /sys/block/<disk>/queue/logical_block_size | ||
87 | Date: May 2009 | ||
88 | Contact: Martin K. Petersen <martin.petersen@oracle.com> | ||
89 | Description: | ||
90 | This is the smallest unit the storage device can | ||
91 | address. It is typically 512 bytes. | ||
92 | |||
93 | What: /sys/block/<disk>/queue/physical_block_size | ||
94 | Date: May 2009 | ||
95 | Contact: Martin K. Petersen <martin.petersen@oracle.com> | ||
96 | Description: | ||
97 | This is the smallest unit the storage device can write | ||
98 | without resorting to read-modify-write operation. It is | ||
99 | usually the same as the logical block size but may be | ||
100 | bigger. One example is SATA drives with 4KB sectors | ||
101 | that expose a 512-byte logical block size to the | ||
102 | operating system. | ||
103 | |||
104 | What: /sys/block/<disk>/queue/minimum_io_size | ||
105 | Date: April 2009 | ||
106 | Contact: Martin K. Petersen <martin.petersen@oracle.com> | ||
107 | Description: | ||
108 | Storage devices may report a preferred minimum I/O size, | ||
109 | which is the smallest request the device can perform | ||
110 | without incurring a read-modify-write penalty. For disk | ||
111 | drives this is often the physical block size. For RAID | ||
112 | arrays it is often the stripe chunk size. | ||
113 | |||
114 | What: /sys/block/<disk>/queue/optimal_io_size | ||
115 | Date: April 2009 | ||
116 | Contact: Martin K. Petersen <martin.petersen@oracle.com> | ||
117 | Description: | ||
118 | Storage devices may report an optimal I/O size, which is | ||
119 | the device's preferred unit of receiving I/O. This is | ||
120 | rarely reported for disk drives. For RAID devices it is | ||
121 | usually the stripe width or the internal block size. | ||
diff --git a/block/blk-settings.c b/block/blk-settings.c index b0f547cecfb8..5649f34adb40 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c | |||
@@ -309,9 +309,94 @@ EXPORT_SYMBOL(blk_queue_max_segment_size); | |||
309 | void blk_queue_logical_block_size(struct request_queue *q, unsigned short size) | 309 | void blk_queue_logical_block_size(struct request_queue *q, unsigned short size) |
310 | { | 310 | { |
311 | q->limits.logical_block_size = size; | 311 | q->limits.logical_block_size = size; |
312 | |||
313 | if (q->limits.physical_block_size < size) | ||
314 | q->limits.physical_block_size = size; | ||
315 | |||
316 | if (q->limits.io_min < q->limits.physical_block_size) | ||
317 | q->limits.io_min = q->limits.physical_block_size; | ||
312 | } | 318 | } |
313 | EXPORT_SYMBOL(blk_queue_logical_block_size); | 319 | EXPORT_SYMBOL(blk_queue_logical_block_size); |
314 | 320 | ||
321 | /** | ||
322 | * blk_queue_physical_block_size - set physical block size for the queue | ||
323 | * @q: the request queue for the device | ||
324 | * @size: the physical block size, in bytes | ||
325 | * | ||
326 | * Description: | ||
327 | * This should be set to the lowest possible sector size that the | ||
328 | * hardware can operate on without reverting to read-modify-write | ||
329 | * operations. | ||
330 | */ | ||
331 | void blk_queue_physical_block_size(struct request_queue *q, unsigned short size) | ||
332 | { | ||
333 | q->limits.physical_block_size = size; | ||
334 | |||
335 | if (q->limits.physical_block_size < q->limits.logical_block_size) | ||
336 | q->limits.physical_block_size = q->limits.logical_block_size; | ||
337 | |||
338 | if (q->limits.io_min < q->limits.physical_block_size) | ||
339 | q->limits.io_min = q->limits.physical_block_size; | ||
340 | } | ||
341 | EXPORT_SYMBOL(blk_queue_physical_block_size); | ||
342 | |||
343 | /** | ||
344 | * blk_queue_alignment_offset - set physical block alignment offset | ||
345 | * @q: the request queue for the device | ||
346 | * @alignment: alignment offset in bytes | ||
347 | * | ||
348 | * Description: | ||
349 | * Some devices are naturally misaligned to compensate for things like | ||
350 | * the legacy DOS partition table 63-sector offset. Low-level drivers | ||
351 | * should call this function for devices whose first sector is not | ||
352 | * naturally aligned. | ||
353 | */ | ||
354 | void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset) | ||
355 | { | ||
356 | q->limits.alignment_offset = | ||
357 | offset & (q->limits.physical_block_size - 1); | ||
358 | q->limits.misaligned = 0; | ||
359 | } | ||
360 | EXPORT_SYMBOL(blk_queue_alignment_offset); | ||
361 | |||
362 | /** | ||
363 | * blk_queue_io_min - set minimum request size for the queue | ||
364 | * @q: the request queue for the device | ||
365 | * @io_min: smallest I/O size in bytes | ||
366 | * | ||
367 | * Description: | ||
368 | * Some devices have an internal block size bigger than the reported | ||
369 | * hardware sector size. This function can be used to signal the | ||
370 | * smallest I/O the device can perform without incurring a performance | ||
371 | * penalty. | ||
372 | */ | ||
373 | void blk_queue_io_min(struct request_queue *q, unsigned int min) | ||
374 | { | ||
375 | q->limits.io_min = min; | ||
376 | |||
377 | if (q->limits.io_min < q->limits.logical_block_size) | ||
378 | q->limits.io_min = q->limits.logical_block_size; | ||
379 | |||
380 | if (q->limits.io_min < q->limits.physical_block_size) | ||
381 | q->limits.io_min = q->limits.physical_block_size; | ||
382 | } | ||
383 | EXPORT_SYMBOL(blk_queue_io_min); | ||
384 | |||
385 | /** | ||
386 | * blk_queue_io_opt - set optimal request size for the queue | ||
387 | * @q: the request queue for the device | ||
388 | * @io_opt: optimal request size in bytes | ||
389 | * | ||
390 | * Description: | ||
391 | * Drivers can call this function to set the preferred I/O request | ||
392 | * size for devices that report such a value. | ||
393 | */ | ||
394 | void blk_queue_io_opt(struct request_queue *q, unsigned int opt) | ||
395 | { | ||
396 | q->limits.io_opt = opt; | ||
397 | } | ||
398 | EXPORT_SYMBOL(blk_queue_io_opt); | ||
399 | |||
315 | /* | 400 | /* |
316 | * Returns the minimum that is _not_ zero, unless both are zero. | 401 | * Returns the minimum that is _not_ zero, unless both are zero. |
317 | */ | 402 | */ |
@@ -358,6 +443,107 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) | |||
358 | EXPORT_SYMBOL(blk_queue_stack_limits); | 443 | EXPORT_SYMBOL(blk_queue_stack_limits); |
359 | 444 | ||
360 | /** | 445 | /** |
446 | * blk_stack_limits - adjust queue_limits for stacked devices | ||
447 | * @t: the stacking driver limits (top) | ||
448 | * @bdev: the underlying queue limits (bottom) | ||
449 | * @offset: offset to beginning of data within component device | ||
450 | * | ||
451 | * Description: | ||
452 | * Merges two queue_limit structs. Returns 0 if alignment didn't | ||
453 | * change. Returns -1 if adding the bottom device caused | ||
454 | * misalignment. | ||
455 | */ | ||
456 | int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, | ||
457 | sector_t offset) | ||
458 | { | ||
459 | t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); | ||
460 | t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); | ||
461 | |||
462 | t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, | ||
463 | b->seg_boundary_mask); | ||
464 | |||
465 | t->max_phys_segments = min_not_zero(t->max_phys_segments, | ||
466 | b->max_phys_segments); | ||
467 | |||
468 | t->max_hw_segments = min_not_zero(t->max_hw_segments, | ||
469 | b->max_hw_segments); | ||
470 | |||
471 | t->max_segment_size = min_not_zero(t->max_segment_size, | ||
472 | b->max_segment_size); | ||
473 | |||
474 | t->logical_block_size = max(t->logical_block_size, | ||
475 | b->logical_block_size); | ||
476 | |||
477 | t->physical_block_size = max(t->physical_block_size, | ||
478 | b->physical_block_size); | ||
479 | |||
480 | t->io_min = max(t->io_min, b->io_min); | ||
481 | t->no_cluster |= b->no_cluster; | ||
482 | |||
483 | /* Bottom device offset aligned? */ | ||
484 | if (offset && | ||
485 | (offset & (b->physical_block_size - 1)) != b->alignment_offset) { | ||
486 | t->misaligned = 1; | ||
487 | return -1; | ||
488 | } | ||
489 | |||
490 | /* If top has no alignment offset, inherit from bottom */ | ||
491 | if (!t->alignment_offset) | ||
492 | t->alignment_offset = | ||
493 | b->alignment_offset & (b->physical_block_size - 1); | ||
494 | |||
495 | /* Top device aligned on logical block boundary? */ | ||
496 | if (t->alignment_offset & (t->logical_block_size - 1)) { | ||
497 | t->misaligned = 1; | ||
498 | return -1; | ||
499 | } | ||
500 | |||
501 | return 0; | ||
502 | } | ||
503 | |||
504 | /** | ||
505 | * disk_stack_limits - adjust queue limits for stacked drivers | ||
506 | * @t: MD/DM gendisk (top) | ||
507 | * @bdev: the underlying block device (bottom) | ||
508 | * @offset: offset to beginning of data within component device | ||
509 | * | ||
510 | * Description: | ||
511 | * Merges the limits for two queues. Returns 0 if alignment | ||
512 | * didn't change. Returns -1 if adding the bottom device caused | ||
513 | * misalignment. | ||
514 | */ | ||
515 | void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, | ||
516 | sector_t offset) | ||
517 | { | ||
518 | struct request_queue *t = disk->queue; | ||
519 | struct request_queue *b = bdev_get_queue(bdev); | ||
520 | |||
521 | offset += get_start_sect(bdev) << 9; | ||
522 | |||
523 | if (blk_stack_limits(&t->limits, &b->limits, offset) < 0) { | ||
524 | char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE]; | ||
525 | |||
526 | disk_name(disk, 0, top); | ||
527 | bdevname(bdev, bottom); | ||
528 | |||
529 | printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n", | ||
530 | top, bottom); | ||
531 | } | ||
532 | |||
533 | if (!t->queue_lock) | ||
534 | WARN_ON_ONCE(1); | ||
535 | else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) { | ||
536 | unsigned long flags; | ||
537 | |||
538 | spin_lock_irqsave(t->queue_lock, flags); | ||
539 | if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) | ||
540 | queue_flag_clear(QUEUE_FLAG_CLUSTER, t); | ||
541 | spin_unlock_irqrestore(t->queue_lock, flags); | ||
542 | } | ||
543 | } | ||
544 | EXPORT_SYMBOL(disk_stack_limits); | ||
545 | |||
546 | /** | ||
361 | * blk_queue_dma_pad - set pad mask | 547 | * blk_queue_dma_pad - set pad mask |
362 | * @q: the request queue for the device | 548 | * @q: the request queue for the device |
363 | * @mask: pad mask | 549 | * @mask: pad mask |
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 3ccdadb8e204..9337e17f9110 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c | |||
@@ -105,6 +105,21 @@ static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page | |||
105 | return queue_var_show(queue_logical_block_size(q), page); | 105 | return queue_var_show(queue_logical_block_size(q), page); |
106 | } | 106 | } |
107 | 107 | ||
108 | static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page) | ||
109 | { | ||
110 | return queue_var_show(queue_physical_block_size(q), page); | ||
111 | } | ||
112 | |||
113 | static ssize_t queue_io_min_show(struct request_queue *q, char *page) | ||
114 | { | ||
115 | return queue_var_show(queue_io_min(q), page); | ||
116 | } | ||
117 | |||
118 | static ssize_t queue_io_opt_show(struct request_queue *q, char *page) | ||
119 | { | ||
120 | return queue_var_show(queue_io_opt(q), page); | ||
121 | } | ||
122 | |||
108 | static ssize_t | 123 | static ssize_t |
109 | queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) | 124 | queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) |
110 | { | 125 | { |
@@ -257,6 +272,21 @@ static struct queue_sysfs_entry queue_logical_block_size_entry = { | |||
257 | .show = queue_logical_block_size_show, | 272 | .show = queue_logical_block_size_show, |
258 | }; | 273 | }; |
259 | 274 | ||
275 | static struct queue_sysfs_entry queue_physical_block_size_entry = { | ||
276 | .attr = {.name = "physical_block_size", .mode = S_IRUGO }, | ||
277 | .show = queue_physical_block_size_show, | ||
278 | }; | ||
279 | |||
280 | static struct queue_sysfs_entry queue_io_min_entry = { | ||
281 | .attr = {.name = "minimum_io_size", .mode = S_IRUGO }, | ||
282 | .show = queue_io_min_show, | ||
283 | }; | ||
284 | |||
285 | static struct queue_sysfs_entry queue_io_opt_entry = { | ||
286 | .attr = {.name = "optimal_io_size", .mode = S_IRUGO }, | ||
287 | .show = queue_io_opt_show, | ||
288 | }; | ||
289 | |||
260 | static struct queue_sysfs_entry queue_nonrot_entry = { | 290 | static struct queue_sysfs_entry queue_nonrot_entry = { |
261 | .attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR }, | 291 | .attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR }, |
262 | .show = queue_nonrot_show, | 292 | .show = queue_nonrot_show, |
@@ -289,6 +319,9 @@ static struct attribute *default_attrs[] = { | |||
289 | &queue_iosched_entry.attr, | 319 | &queue_iosched_entry.attr, |
290 | &queue_hw_sector_size_entry.attr, | 320 | &queue_hw_sector_size_entry.attr, |
291 | &queue_logical_block_size_entry.attr, | 321 | &queue_logical_block_size_entry.attr, |
322 | &queue_physical_block_size_entry.attr, | ||
323 | &queue_io_min_entry.attr, | ||
324 | &queue_io_opt_entry.attr, | ||
292 | &queue_nonrot_entry.attr, | 325 | &queue_nonrot_entry.attr, |
293 | &queue_nomerges_entry.attr, | 326 | &queue_nomerges_entry.attr, |
294 | &queue_rq_affinity_entry.attr, | 327 | &queue_rq_affinity_entry.attr, |
diff --git a/block/genhd.c b/block/genhd.c index 1a4916e01732..fe7ccc0a618f 100644 --- a/block/genhd.c +++ b/block/genhd.c | |||
@@ -852,11 +852,21 @@ static ssize_t disk_capability_show(struct device *dev, | |||
852 | return sprintf(buf, "%x\n", disk->flags); | 852 | return sprintf(buf, "%x\n", disk->flags); |
853 | } | 853 | } |
854 | 854 | ||
855 | static ssize_t disk_alignment_offset_show(struct device *dev, | ||
856 | struct device_attribute *attr, | ||
857 | char *buf) | ||
858 | { | ||
859 | struct gendisk *disk = dev_to_disk(dev); | ||
860 | |||
861 | return sprintf(buf, "%d\n", queue_alignment_offset(disk->queue)); | ||
862 | } | ||
863 | |||
855 | static DEVICE_ATTR(range, S_IRUGO, disk_range_show, NULL); | 864 | static DEVICE_ATTR(range, S_IRUGO, disk_range_show, NULL); |
856 | static DEVICE_ATTR(ext_range, S_IRUGO, disk_ext_range_show, NULL); | 865 | static DEVICE_ATTR(ext_range, S_IRUGO, disk_ext_range_show, NULL); |
857 | static DEVICE_ATTR(removable, S_IRUGO, disk_removable_show, NULL); | 866 | static DEVICE_ATTR(removable, S_IRUGO, disk_removable_show, NULL); |
858 | static DEVICE_ATTR(ro, S_IRUGO, disk_ro_show, NULL); | 867 | static DEVICE_ATTR(ro, S_IRUGO, disk_ro_show, NULL); |
859 | static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL); | 868 | static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL); |
869 | static DEVICE_ATTR(alignment_offset, S_IRUGO, disk_alignment_offset_show, NULL); | ||
860 | static DEVICE_ATTR(capability, S_IRUGO, disk_capability_show, NULL); | 870 | static DEVICE_ATTR(capability, S_IRUGO, disk_capability_show, NULL); |
861 | static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL); | 871 | static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL); |
862 | #ifdef CONFIG_FAIL_MAKE_REQUEST | 872 | #ifdef CONFIG_FAIL_MAKE_REQUEST |
@@ -875,6 +885,7 @@ static struct attribute *disk_attrs[] = { | |||
875 | &dev_attr_removable.attr, | 885 | &dev_attr_removable.attr, |
876 | &dev_attr_ro.attr, | 886 | &dev_attr_ro.attr, |
877 | &dev_attr_size.attr, | 887 | &dev_attr_size.attr, |
888 | &dev_attr_alignment_offset.attr, | ||
878 | &dev_attr_capability.attr, | 889 | &dev_attr_capability.attr, |
879 | &dev_attr_stat.attr, | 890 | &dev_attr_stat.attr, |
880 | #ifdef CONFIG_FAIL_MAKE_REQUEST | 891 | #ifdef CONFIG_FAIL_MAKE_REQUEST |
diff --git a/fs/partitions/check.c b/fs/partitions/check.c index 99e33ef40be4..0af36085eb28 100644 --- a/fs/partitions/check.c +++ b/fs/partitions/check.c | |||
@@ -219,6 +219,13 @@ ssize_t part_size_show(struct device *dev, | |||
219 | return sprintf(buf, "%llu\n",(unsigned long long)p->nr_sects); | 219 | return sprintf(buf, "%llu\n",(unsigned long long)p->nr_sects); |
220 | } | 220 | } |
221 | 221 | ||
222 | ssize_t part_alignment_offset_show(struct device *dev, | ||
223 | struct device_attribute *attr, char *buf) | ||
224 | { | ||
225 | struct hd_struct *p = dev_to_part(dev); | ||
226 | return sprintf(buf, "%llu\n", (unsigned long long)p->alignment_offset); | ||
227 | } | ||
228 | |||
222 | ssize_t part_stat_show(struct device *dev, | 229 | ssize_t part_stat_show(struct device *dev, |
223 | struct device_attribute *attr, char *buf) | 230 | struct device_attribute *attr, char *buf) |
224 | { | 231 | { |
@@ -272,6 +279,7 @@ ssize_t part_fail_store(struct device *dev, | |||
272 | static DEVICE_ATTR(partition, S_IRUGO, part_partition_show, NULL); | 279 | static DEVICE_ATTR(partition, S_IRUGO, part_partition_show, NULL); |
273 | static DEVICE_ATTR(start, S_IRUGO, part_start_show, NULL); | 280 | static DEVICE_ATTR(start, S_IRUGO, part_start_show, NULL); |
274 | static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL); | 281 | static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL); |
282 | static DEVICE_ATTR(alignment_offset, S_IRUGO, part_alignment_offset_show, NULL); | ||
275 | static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL); | 283 | static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL); |
276 | #ifdef CONFIG_FAIL_MAKE_REQUEST | 284 | #ifdef CONFIG_FAIL_MAKE_REQUEST |
277 | static struct device_attribute dev_attr_fail = | 285 | static struct device_attribute dev_attr_fail = |
@@ -282,6 +290,7 @@ static struct attribute *part_attrs[] = { | |||
282 | &dev_attr_partition.attr, | 290 | &dev_attr_partition.attr, |
283 | &dev_attr_start.attr, | 291 | &dev_attr_start.attr, |
284 | &dev_attr_size.attr, | 292 | &dev_attr_size.attr, |
293 | &dev_attr_alignment_offset.attr, | ||
285 | &dev_attr_stat.attr, | 294 | &dev_attr_stat.attr, |
286 | #ifdef CONFIG_FAIL_MAKE_REQUEST | 295 | #ifdef CONFIG_FAIL_MAKE_REQUEST |
287 | &dev_attr_fail.attr, | 296 | &dev_attr_fail.attr, |
@@ -383,6 +392,7 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno, | |||
383 | pdev = part_to_dev(p); | 392 | pdev = part_to_dev(p); |
384 | 393 | ||
385 | p->start_sect = start; | 394 | p->start_sect = start; |
395 | p->alignment_offset = queue_sector_alignment_offset(disk->queue, start); | ||
386 | p->nr_sects = len; | 396 | p->nr_sects = len; |
387 | p->partno = partno; | 397 | p->partno = partno; |
388 | p->policy = get_disk_ro(disk); | 398 | p->policy = get_disk_ro(disk); |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index b7bb6fdba12c..5e740a135e73 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -314,11 +314,16 @@ struct queue_limits { | |||
314 | unsigned int max_hw_sectors; | 314 | unsigned int max_hw_sectors; |
315 | unsigned int max_sectors; | 315 | unsigned int max_sectors; |
316 | unsigned int max_segment_size; | 316 | unsigned int max_segment_size; |
317 | unsigned int physical_block_size; | ||
318 | unsigned int alignment_offset; | ||
319 | unsigned int io_min; | ||
320 | unsigned int io_opt; | ||
317 | 321 | ||
318 | unsigned short logical_block_size; | 322 | unsigned short logical_block_size; |
319 | unsigned short max_hw_segments; | 323 | unsigned short max_hw_segments; |
320 | unsigned short max_phys_segments; | 324 | unsigned short max_phys_segments; |
321 | 325 | ||
326 | unsigned char misaligned; | ||
322 | unsigned char no_cluster; | 327 | unsigned char no_cluster; |
323 | }; | 328 | }; |
324 | 329 | ||
@@ -911,6 +916,15 @@ extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); | |||
911 | extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); | 916 | extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); |
912 | extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); | 917 | extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); |
913 | extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); | 918 | extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); |
919 | extern void blk_queue_physical_block_size(struct request_queue *, unsigned short); | ||
920 | extern void blk_queue_alignment_offset(struct request_queue *q, | ||
921 | unsigned int alignment); | ||
922 | extern void blk_queue_io_min(struct request_queue *q, unsigned int min); | ||
923 | extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); | ||
924 | extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, | ||
925 | sector_t offset); | ||
926 | extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, | ||
927 | sector_t offset); | ||
914 | extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); | 928 | extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); |
915 | extern void blk_queue_dma_pad(struct request_queue *, unsigned int); | 929 | extern void blk_queue_dma_pad(struct request_queue *, unsigned int); |
916 | extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); | 930 | extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); |
@@ -1047,6 +1061,39 @@ static inline unsigned short bdev_logical_block_size(struct block_device *bdev) | |||
1047 | return queue_logical_block_size(bdev_get_queue(bdev)); | 1061 | return queue_logical_block_size(bdev_get_queue(bdev)); |
1048 | } | 1062 | } |
1049 | 1063 | ||
1064 | static inline unsigned int queue_physical_block_size(struct request_queue *q) | ||
1065 | { | ||
1066 | return q->limits.physical_block_size; | ||
1067 | } | ||
1068 | |||
1069 | static inline unsigned int queue_io_min(struct request_queue *q) | ||
1070 | { | ||
1071 | return q->limits.io_min; | ||
1072 | } | ||
1073 | |||
1074 | static inline unsigned int queue_io_opt(struct request_queue *q) | ||
1075 | { | ||
1076 | return q->limits.io_opt; | ||
1077 | } | ||
1078 | |||
1079 | static inline int queue_alignment_offset(struct request_queue *q) | ||
1080 | { | ||
1081 | if (q && q->limits.misaligned) | ||
1082 | return -1; | ||
1083 | |||
1084 | if (q && q->limits.alignment_offset) | ||
1085 | return q->limits.alignment_offset; | ||
1086 | |||
1087 | return 0; | ||
1088 | } | ||
1089 | |||
1090 | static inline int queue_sector_alignment_offset(struct request_queue *q, | ||
1091 | sector_t sector) | ||
1092 | { | ||
1093 | return ((sector << 9) - q->limits.alignment_offset) | ||
1094 | & (q->limits.io_min - 1); | ||
1095 | } | ||
1096 | |||
1050 | static inline int queue_dma_alignment(struct request_queue *q) | 1097 | static inline int queue_dma_alignment(struct request_queue *q) |
1051 | { | 1098 | { |
1052 | return q ? q->dma_alignment : 511; | 1099 | return q ? q->dma_alignment : 511; |
diff --git a/include/linux/genhd.h b/include/linux/genhd.h index a1a28caed23d..149fda264c86 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h | |||
@@ -90,6 +90,7 @@ struct disk_stats { | |||
90 | struct hd_struct { | 90 | struct hd_struct { |
91 | sector_t start_sect; | 91 | sector_t start_sect; |
92 | sector_t nr_sects; | 92 | sector_t nr_sects; |
93 | sector_t alignment_offset; | ||
93 | struct device __dev; | 94 | struct device __dev; |
94 | struct kobject *holder_dir; | 95 | struct kobject *holder_dir; |
95 | int policy, partno; | 96 | int policy, partno; |