diff options
author | Alan D. Brunelle <Alan.Brunelle@hp.com> | 2008-04-29 08:44:19 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2008-04-29 08:48:55 -0400 |
commit | ac9fafa1243640349aa481adf473db283a695766 (patch) | |
tree | 155c2371cca8971638d781269f39fa015bc6509c /block | |
parent | d7e3c3249ef23b4617393c69fe464765b4ff1645 (diff) |
block: Skip I/O merges when disabled
The block I/O + elevator + I/O scheduler code spend a lot of time trying
to merge I/Os -- rightfully so under "normal" circumstances. However,
if one were to know that the incoming I/O stream was /very/ random in
nature, the cycles are wasted.
This patch adds a per-request_queue tunable that (when set) disables
merge attempts (beyond the simple one-hit cache check), thus freeing up
a non-trivial amount of CPU cycles.
Signed-off-by: Alan D. Brunelle <alan.brunelle@hp.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-sysfs.c | 26 | ||||
-rw-r--r-- | block/elevator.c | 3 |
2 files changed, 29 insertions, 0 deletions
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index fc41d83be22b..e85c4013e8a2 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c | |||
@@ -135,6 +135,25 @@ static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) | |||
135 | return queue_var_show(max_hw_sectors_kb, (page)); | 135 | return queue_var_show(max_hw_sectors_kb, (page)); |
136 | } | 136 | } |
137 | 137 | ||
138 | static ssize_t queue_nomerges_show(struct request_queue *q, char *page) | ||
139 | { | ||
140 | return queue_var_show(blk_queue_nomerges(q), page); | ||
141 | } | ||
142 | |||
143 | static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, | ||
144 | size_t count) | ||
145 | { | ||
146 | unsigned long nm; | ||
147 | ssize_t ret = queue_var_store(&nm, page, count); | ||
148 | |||
149 | if (nm) | ||
150 | set_bit(QUEUE_FLAG_NOMERGES, &q->queue_flags); | ||
151 | else | ||
152 | clear_bit(QUEUE_FLAG_NOMERGES, &q->queue_flags); | ||
153 | |||
154 | return ret; | ||
155 | } | ||
156 | |||
138 | 157 | ||
139 | static struct queue_sysfs_entry queue_requests_entry = { | 158 | static struct queue_sysfs_entry queue_requests_entry = { |
140 | .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR }, | 159 | .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR }, |
@@ -170,6 +189,12 @@ static struct queue_sysfs_entry queue_hw_sector_size_entry = { | |||
170 | .show = queue_hw_sector_size_show, | 189 | .show = queue_hw_sector_size_show, |
171 | }; | 190 | }; |
172 | 191 | ||
192 | static struct queue_sysfs_entry queue_nomerges_entry = { | ||
193 | .attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR }, | ||
194 | .show = queue_nomerges_show, | ||
195 | .store = queue_nomerges_store, | ||
196 | }; | ||
197 | |||
173 | static struct attribute *default_attrs[] = { | 198 | static struct attribute *default_attrs[] = { |
174 | &queue_requests_entry.attr, | 199 | &queue_requests_entry.attr, |
175 | &queue_ra_entry.attr, | 200 | &queue_ra_entry.attr, |
@@ -177,6 +202,7 @@ static struct attribute *default_attrs[] = { | |||
177 | &queue_max_sectors_entry.attr, | 202 | &queue_max_sectors_entry.attr, |
178 | &queue_iosched_entry.attr, | 203 | &queue_iosched_entry.attr, |
179 | &queue_hw_sector_size_entry.attr, | 204 | &queue_hw_sector_size_entry.attr, |
205 | &queue_nomerges_entry.attr, | ||
180 | NULL, | 206 | NULL, |
181 | }; | 207 | }; |
182 | 208 | ||
diff --git a/block/elevator.c b/block/elevator.c index 7253fa05db0a..ac5310ef8270 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
@@ -488,6 +488,9 @@ int elv_merge(struct request_queue *q, struct request **req, struct bio *bio) | |||
488 | } | 488 | } |
489 | } | 489 | } |
490 | 490 | ||
491 | if (blk_queue_nomerges(q)) | ||
492 | return ELEVATOR_NO_MERGE; | ||
493 | |||
491 | /* | 494 | /* |
492 | * See if our hash lookup can find a potential backmerge. | 495 | * See if our hash lookup can find a potential backmerge. |
493 | */ | 496 | */ |