aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-sysfs.c
diff options
context:
space:
mode:
authorAlan D. Brunelle <Alan.Brunelle@hp.com>2008-04-29 08:44:19 -0400
committerJens Axboe <jens.axboe@oracle.com>2008-04-29 08:48:55 -0400
commitac9fafa1243640349aa481adf473db283a695766 (patch)
tree155c2371cca8971638d781269f39fa015bc6509c /block/blk-sysfs.c
parentd7e3c3249ef23b4617393c69fe464765b4ff1645 (diff)
block: Skip I/O merges when disabled
The block I/O + elevator + I/O scheduler code spend a lot of time trying to merge I/Os -- rightfully so under "normal" circumstances. However, if one were to know that the incoming I/O stream was /very/ random in nature, the cycles are wasted. This patch adds a per-request_queue tunable that (when set) disables merge attempts (beyond the simple one-hit cache check), thus freeing up a non-trivial amount of CPU cycles. Signed-off-by: Alan D. Brunelle <alan.brunelle@hp.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/blk-sysfs.c')
-rw-r--r--block/blk-sysfs.c26
1 files changed, 26 insertions, 0 deletions
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index fc41d83be22b..e85c4013e8a2 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -135,6 +135,25 @@ static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
135 return queue_var_show(max_hw_sectors_kb, (page)); 135 return queue_var_show(max_hw_sectors_kb, (page));
136} 136}
137 137
138static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
139{
140 return queue_var_show(blk_queue_nomerges(q), page);
141}
142
143static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
144 size_t count)
145{
146 unsigned long nm;
147 ssize_t ret = queue_var_store(&nm, page, count);
148
149 if (nm)
150 set_bit(QUEUE_FLAG_NOMERGES, &q->queue_flags);
151 else
152 clear_bit(QUEUE_FLAG_NOMERGES, &q->queue_flags);
153
154 return ret;
155}
156
138 157
139static struct queue_sysfs_entry queue_requests_entry = { 158static struct queue_sysfs_entry queue_requests_entry = {
140 .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR }, 159 .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
@@ -170,6 +189,12 @@ static struct queue_sysfs_entry queue_hw_sector_size_entry = {
170 .show = queue_hw_sector_size_show, 189 .show = queue_hw_sector_size_show,
171}; 190};
172 191
192static struct queue_sysfs_entry queue_nomerges_entry = {
193 .attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR },
194 .show = queue_nomerges_show,
195 .store = queue_nomerges_store,
196};
197
173static struct attribute *default_attrs[] = { 198static struct attribute *default_attrs[] = {
174 &queue_requests_entry.attr, 199 &queue_requests_entry.attr,
175 &queue_ra_entry.attr, 200 &queue_ra_entry.attr,
@@ -177,6 +202,7 @@ static struct attribute *default_attrs[] = {
177 &queue_max_sectors_entry.attr, 202 &queue_max_sectors_entry.attr,
178 &queue_iosched_entry.attr, 203 &queue_iosched_entry.attr,
179 &queue_hw_sector_size_entry.attr, 204 &queue_hw_sector_size_entry.attr,
205 &queue_nomerges_entry.attr,
180 NULL, 206 NULL,
181}; 207};
182 208