diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-11 13:52:27 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-11 14:10:35 -0400 |
commit | c9059598ea8981d02356eead3188bf7fa4d717b8 (patch) | |
tree | 03e73b20a30e988da7c6a3e0ad93b2dc5843274d /block/blk-sysfs.c | |
parent | 0a33f80a8373eca7f4bea3961d1346c3815fa5ed (diff) | |
parent | b0fd271d5fba0b2d00888363f3869e3f9b26caa9 (diff) |
Merge branch 'for-2.6.31' of git://git.kernel.dk/linux-2.6-block
* 'for-2.6.31' of git://git.kernel.dk/linux-2.6-block: (153 commits)
block: add request clone interface (v2)
floppy: fix hibernation
ramdisk: remove long-deprecated "ramdisk=" boot-time parameter
fs/bio.c: add missing __user annotation
block: prevent possible io_context->refcount overflow
Add serial number support for virtio_blk, V4a
block: Add missing bounce_pfn stacking and fix comments
Revert "block: Fix bounce limit setting in DM"
cciss: decode unit attention in SCSI error handling code
cciss: Remove no longer needed sendcmd reject processing code
cciss: change SCSI error handling routines to work with interrupts enabled.
cciss: separate error processing and command retrying code in sendcmd_withirq_core()
cciss: factor out fix target status processing code from sendcmd functions
cciss: simplify interface of sendcmd() and sendcmd_withirq()
cciss: factor out core of sendcmd_withirq() for use by SCSI error handling code
cciss: Use schedule_timeout_uninterruptible in SCSI error handling code
block: needs to set the residual length of a bidi request
Revert "block: implement blkdev_readpages"
block: Fix bounce limit setting in DM
Removed reference to non-existing file Documentation/PCI/PCI-DMA-mapping.txt
...
Manually fix conflicts with tracing updates in:
block/blk-sysfs.c
drivers/ide/ide-atapi.c
drivers/ide/ide-cd.c
drivers/ide/ide-floppy.c
drivers/ide/ide-tape.c
include/trace/events/block.h
kernel/trace/blktrace.c
Diffstat (limited to 'block/blk-sysfs.c')
-rw-r--r-- | block/blk-sysfs.c | 62 |
1 files changed, 50 insertions, 12 deletions
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 26f9ec28f56c..b1cd04087d6a 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c | |||
@@ -95,21 +95,36 @@ queue_ra_store(struct request_queue *q, const char *page, size_t count) | |||
95 | 95 | ||
96 | static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) | 96 | static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) |
97 | { | 97 | { |
98 | int max_sectors_kb = q->max_sectors >> 1; | 98 | int max_sectors_kb = queue_max_sectors(q) >> 1; |
99 | 99 | ||
100 | return queue_var_show(max_sectors_kb, (page)); | 100 | return queue_var_show(max_sectors_kb, (page)); |
101 | } | 101 | } |
102 | 102 | ||
103 | static ssize_t queue_hw_sector_size_show(struct request_queue *q, char *page) | 103 | static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) |
104 | { | 104 | { |
105 | return queue_var_show(q->hardsect_size, page); | 105 | return queue_var_show(queue_logical_block_size(q), page); |
106 | } | ||
107 | |||
108 | static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page) | ||
109 | { | ||
110 | return queue_var_show(queue_physical_block_size(q), page); | ||
111 | } | ||
112 | |||
113 | static ssize_t queue_io_min_show(struct request_queue *q, char *page) | ||
114 | { | ||
115 | return queue_var_show(queue_io_min(q), page); | ||
116 | } | ||
117 | |||
118 | static ssize_t queue_io_opt_show(struct request_queue *q, char *page) | ||
119 | { | ||
120 | return queue_var_show(queue_io_opt(q), page); | ||
106 | } | 121 | } |
107 | 122 | ||
108 | static ssize_t | 123 | static ssize_t |
109 | queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) | 124 | queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) |
110 | { | 125 | { |
111 | unsigned long max_sectors_kb, | 126 | unsigned long max_sectors_kb, |
112 | max_hw_sectors_kb = q->max_hw_sectors >> 1, | 127 | max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1, |
113 | page_kb = 1 << (PAGE_CACHE_SHIFT - 10); | 128 | page_kb = 1 << (PAGE_CACHE_SHIFT - 10); |
114 | ssize_t ret = queue_var_store(&max_sectors_kb, page, count); | 129 | ssize_t ret = queue_var_store(&max_sectors_kb, page, count); |
115 | 130 | ||
@@ -117,7 +132,7 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) | |||
117 | return -EINVAL; | 132 | return -EINVAL; |
118 | 133 | ||
119 | spin_lock_irq(q->queue_lock); | 134 | spin_lock_irq(q->queue_lock); |
120 | q->max_sectors = max_sectors_kb << 1; | 135 | blk_queue_max_sectors(q, max_sectors_kb << 1); |
121 | spin_unlock_irq(q->queue_lock); | 136 | spin_unlock_irq(q->queue_lock); |
122 | 137 | ||
123 | return ret; | 138 | return ret; |
@@ -125,7 +140,7 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) | |||
125 | 140 | ||
126 | static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) | 141 | static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) |
127 | { | 142 | { |
128 | int max_hw_sectors_kb = q->max_hw_sectors >> 1; | 143 | int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1; |
129 | 144 | ||
130 | return queue_var_show(max_hw_sectors_kb, (page)); | 145 | return queue_var_show(max_hw_sectors_kb, (page)); |
131 | } | 146 | } |
@@ -249,7 +264,27 @@ static struct queue_sysfs_entry queue_iosched_entry = { | |||
249 | 264 | ||
250 | static struct queue_sysfs_entry queue_hw_sector_size_entry = { | 265 | static struct queue_sysfs_entry queue_hw_sector_size_entry = { |
251 | .attr = {.name = "hw_sector_size", .mode = S_IRUGO }, | 266 | .attr = {.name = "hw_sector_size", .mode = S_IRUGO }, |
252 | .show = queue_hw_sector_size_show, | 267 | .show = queue_logical_block_size_show, |
268 | }; | ||
269 | |||
270 | static struct queue_sysfs_entry queue_logical_block_size_entry = { | ||
271 | .attr = {.name = "logical_block_size", .mode = S_IRUGO }, | ||
272 | .show = queue_logical_block_size_show, | ||
273 | }; | ||
274 | |||
275 | static struct queue_sysfs_entry queue_physical_block_size_entry = { | ||
276 | .attr = {.name = "physical_block_size", .mode = S_IRUGO }, | ||
277 | .show = queue_physical_block_size_show, | ||
278 | }; | ||
279 | |||
280 | static struct queue_sysfs_entry queue_io_min_entry = { | ||
281 | .attr = {.name = "minimum_io_size", .mode = S_IRUGO }, | ||
282 | .show = queue_io_min_show, | ||
283 | }; | ||
284 | |||
285 | static struct queue_sysfs_entry queue_io_opt_entry = { | ||
286 | .attr = {.name = "optimal_io_size", .mode = S_IRUGO }, | ||
287 | .show = queue_io_opt_show, | ||
253 | }; | 288 | }; |
254 | 289 | ||
255 | static struct queue_sysfs_entry queue_nonrot_entry = { | 290 | static struct queue_sysfs_entry queue_nonrot_entry = { |
@@ -283,6 +318,10 @@ static struct attribute *default_attrs[] = { | |||
283 | &queue_max_sectors_entry.attr, | 318 | &queue_max_sectors_entry.attr, |
284 | &queue_iosched_entry.attr, | 319 | &queue_iosched_entry.attr, |
285 | &queue_hw_sector_size_entry.attr, | 320 | &queue_hw_sector_size_entry.attr, |
321 | &queue_logical_block_size_entry.attr, | ||
322 | &queue_physical_block_size_entry.attr, | ||
323 | &queue_io_min_entry.attr, | ||
324 | &queue_io_opt_entry.attr, | ||
286 | &queue_nonrot_entry.attr, | 325 | &queue_nonrot_entry.attr, |
287 | &queue_nomerges_entry.attr, | 326 | &queue_nomerges_entry.attr, |
288 | &queue_rq_affinity_entry.attr, | 327 | &queue_rq_affinity_entry.attr, |
@@ -394,16 +433,15 @@ int blk_register_queue(struct gendisk *disk) | |||
394 | if (ret) | 433 | if (ret) |
395 | return ret; | 434 | return ret; |
396 | 435 | ||
397 | if (!q->request_fn) | 436 | ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); |
398 | return 0; | ||
399 | |||
400 | ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), | ||
401 | "%s", "queue"); | ||
402 | if (ret < 0) | 437 | if (ret < 0) |
403 | return ret; | 438 | return ret; |
404 | 439 | ||
405 | kobject_uevent(&q->kobj, KOBJ_ADD); | 440 | kobject_uevent(&q->kobj, KOBJ_ADD); |
406 | 441 | ||
442 | if (!q->request_fn) | ||
443 | return 0; | ||
444 | |||
407 | ret = elv_register_queue(q); | 445 | ret = elv_register_queue(q); |
408 | if (ret) { | 446 | if (ret) { |
409 | kobject_uevent(&q->kobj, KOBJ_REMOVE); | 447 | kobject_uevent(&q->kobj, KOBJ_REMOVE); |