diff options
Diffstat (limited to 'block/blk-sysfs.c')
-rw-r--r-- | block/blk-sysfs.c | 27 |
1 files changed, 20 insertions, 7 deletions
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 0749b89c6885..d935bd859c87 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c | |||
@@ -66,14 +66,14 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count) | |||
66 | 66 | ||
67 | if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { | 67 | if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { |
68 | blk_set_queue_full(q, BLK_RW_SYNC); | 68 | blk_set_queue_full(q, BLK_RW_SYNC); |
69 | } else if (rl->count[BLK_RW_SYNC]+1 <= q->nr_requests) { | 69 | } else { |
70 | blk_clear_queue_full(q, BLK_RW_SYNC); | 70 | blk_clear_queue_full(q, BLK_RW_SYNC); |
71 | wake_up(&rl->wait[BLK_RW_SYNC]); | 71 | wake_up(&rl->wait[BLK_RW_SYNC]); |
72 | } | 72 | } |
73 | 73 | ||
74 | if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { | 74 | if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { |
75 | blk_set_queue_full(q, BLK_RW_ASYNC); | 75 | blk_set_queue_full(q, BLK_RW_ASYNC); |
76 | } else if (rl->count[BLK_RW_ASYNC]+1 <= q->nr_requests) { | 76 | } else { |
77 | blk_clear_queue_full(q, BLK_RW_ASYNC); | 77 | blk_clear_queue_full(q, BLK_RW_ASYNC); |
78 | wake_up(&rl->wait[BLK_RW_ASYNC]); | 78 | wake_up(&rl->wait[BLK_RW_ASYNC]); |
79 | } | 79 | } |
@@ -112,9 +112,14 @@ static ssize_t queue_max_segments_show(struct request_queue *q, char *page) | |||
112 | return queue_var_show(queue_max_segments(q), (page)); | 112 | return queue_var_show(queue_max_segments(q), (page)); |
113 | } | 113 | } |
114 | 114 | ||
115 | static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page) | ||
116 | { | ||
117 | return queue_var_show(q->limits.max_integrity_segments, (page)); | ||
118 | } | ||
119 | |||
115 | static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) | 120 | static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) |
116 | { | 121 | { |
117 | if (test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags)) | 122 | if (blk_queue_cluster(q)) |
118 | return queue_var_show(queue_max_segment_size(q), (page)); | 123 | return queue_var_show(queue_max_segment_size(q), (page)); |
119 | 124 | ||
120 | return queue_var_show(PAGE_CACHE_SIZE, (page)); | 125 | return queue_var_show(PAGE_CACHE_SIZE, (page)); |
@@ -147,7 +152,8 @@ static ssize_t queue_discard_granularity_show(struct request_queue *q, char *pag | |||
147 | 152 | ||
148 | static ssize_t queue_discard_max_show(struct request_queue *q, char *page) | 153 | static ssize_t queue_discard_max_show(struct request_queue *q, char *page) |
149 | { | 154 | { |
150 | return queue_var_show(q->limits.max_discard_sectors << 9, page); | 155 | return sprintf(page, "%llu\n", |
156 | (unsigned long long)q->limits.max_discard_sectors << 9); | ||
151 | } | 157 | } |
152 | 158 | ||
153 | static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page) | 159 | static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page) |
@@ -288,6 +294,11 @@ static struct queue_sysfs_entry queue_max_segments_entry = { | |||
288 | .show = queue_max_segments_show, | 294 | .show = queue_max_segments_show, |
289 | }; | 295 | }; |
290 | 296 | ||
297 | static struct queue_sysfs_entry queue_max_integrity_segments_entry = { | ||
298 | .attr = {.name = "max_integrity_segments", .mode = S_IRUGO }, | ||
299 | .show = queue_max_integrity_segments_show, | ||
300 | }; | ||
301 | |||
291 | static struct queue_sysfs_entry queue_max_segment_size_entry = { | 302 | static struct queue_sysfs_entry queue_max_segment_size_entry = { |
292 | .attr = {.name = "max_segment_size", .mode = S_IRUGO }, | 303 | .attr = {.name = "max_segment_size", .mode = S_IRUGO }, |
293 | .show = queue_max_segment_size_show, | 304 | .show = queue_max_segment_size_show, |
@@ -375,6 +386,7 @@ static struct attribute *default_attrs[] = { | |||
375 | &queue_max_hw_sectors_entry.attr, | 386 | &queue_max_hw_sectors_entry.attr, |
376 | &queue_max_sectors_entry.attr, | 387 | &queue_max_sectors_entry.attr, |
377 | &queue_max_segments_entry.attr, | 388 | &queue_max_segments_entry.attr, |
389 | &queue_max_integrity_segments_entry.attr, | ||
378 | &queue_max_segment_size_entry.attr, | 390 | &queue_max_segment_size_entry.attr, |
379 | &queue_iosched_entry.attr, | 391 | &queue_iosched_entry.attr, |
380 | &queue_hw_sector_size_entry.attr, | 392 | &queue_hw_sector_size_entry.attr, |
@@ -487,7 +499,6 @@ int blk_register_queue(struct gendisk *disk) | |||
487 | { | 499 | { |
488 | int ret; | 500 | int ret; |
489 | struct device *dev = disk_to_dev(disk); | 501 | struct device *dev = disk_to_dev(disk); |
490 | |||
491 | struct request_queue *q = disk->queue; | 502 | struct request_queue *q = disk->queue; |
492 | 503 | ||
493 | if (WARN_ON(!q)) | 504 | if (WARN_ON(!q)) |
@@ -498,8 +509,10 @@ int blk_register_queue(struct gendisk *disk) | |||
498 | return ret; | 509 | return ret; |
499 | 510 | ||
500 | ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); | 511 | ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); |
501 | if (ret < 0) | 512 | if (ret < 0) { |
513 | blk_trace_remove_sysfs(dev); | ||
502 | return ret; | 514 | return ret; |
515 | } | ||
503 | 516 | ||
504 | kobject_uevent(&q->kobj, KOBJ_ADD); | 517 | kobject_uevent(&q->kobj, KOBJ_ADD); |
505 | 518 | ||
@@ -510,7 +523,7 @@ int blk_register_queue(struct gendisk *disk) | |||
510 | if (ret) { | 523 | if (ret) { |
511 | kobject_uevent(&q->kobj, KOBJ_REMOVE); | 524 | kobject_uevent(&q->kobj, KOBJ_REMOVE); |
512 | kobject_del(&q->kobj); | 525 | kobject_del(&q->kobj); |
513 | blk_trace_remove_sysfs(disk_to_dev(disk)); | 526 | blk_trace_remove_sysfs(dev); |
514 | kobject_put(&dev->kobj); | 527 | kobject_put(&dev->kobj); |
515 | return ret; | 528 | return ret; |
516 | } | 529 | } |