aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-sysfs.c
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2008-09-13 14:26:01 -0400
committerJens Axboe <jens.axboe@oracle.com>2008-10-09 02:56:09 -0400
commitc7c22e4d5c1fdebfac4dba76de7d0338c2b0d832 (patch)
treeecc3d2517b3471ccc35d4cb4e3b48d4b57205061 /block/blk-sysfs.c
parent18887ad910e56066233a07fd3cfb2fa11338b782 (diff)
block: add support for IO CPU affinity
This patch adds support for controlling the IO completion CPU of either all requests on a queue, or on a per-request basis. We export a sysfs variable (rq_affinity) which, if set, migrates completions of requests to the CPU that originally submitted it. A bio helper (bio_set_completion_cpu()) is also added, so that queuers can ask for completion on that specific CPU. In testing, this has been show to cut the system time by as much as 20-40% on synthetic workloads where CPU affinity is desired. This requires a little help from the architecture, so it'll only work as designed for archs that are using the new generic smp helper infrastructure. Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/blk-sysfs.c')
-rw-r--r--block/blk-sysfs.c31
1 files changed, 31 insertions, 0 deletions
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index b9a6ed16664..21e275d7eed 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -156,6 +156,30 @@ static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
156 return ret; 156 return ret;
157} 157}
158 158
159static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page)
160{
161 unsigned int set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags);
162
163 return queue_var_show(set != 0, page);
164}
165
166static ssize_t
167queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
168{
169 ssize_t ret = -EINVAL;
170#if defined(CONFIG_USE_GENERIC_SMP_HELPERS)
171 unsigned long val;
172
173 ret = queue_var_store(&val, page, count);
174 spin_lock_irq(q->queue_lock);
175 if (val)
176 queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
177 else
178 queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
179 spin_unlock_irq(q->queue_lock);
180#endif
181 return ret;
182}
159 183
160static struct queue_sysfs_entry queue_requests_entry = { 184static struct queue_sysfs_entry queue_requests_entry = {
161 .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR }, 185 .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
@@ -197,6 +221,12 @@ static struct queue_sysfs_entry queue_nomerges_entry = {
197 .store = queue_nomerges_store, 221 .store = queue_nomerges_store,
198}; 222};
199 223
224static struct queue_sysfs_entry queue_rq_affinity_entry = {
225 .attr = {.name = "rq_affinity", .mode = S_IRUGO | S_IWUSR },
226 .show = queue_rq_affinity_show,
227 .store = queue_rq_affinity_store,
228};
229
200static struct attribute *default_attrs[] = { 230static struct attribute *default_attrs[] = {
201 &queue_requests_entry.attr, 231 &queue_requests_entry.attr,
202 &queue_ra_entry.attr, 232 &queue_ra_entry.attr,
@@ -205,6 +235,7 @@ static struct attribute *default_attrs[] = {
205 &queue_iosched_entry.attr, 235 &queue_iosched_entry.attr,
206 &queue_hw_sector_size_entry.attr, 236 &queue_hw_sector_size_entry.attr,
207 &queue_nomerges_entry.attr, 237 &queue_nomerges_entry.attr,
238 &queue_rq_affinity_entry.attr,
208 NULL, 239 NULL,
209}; 240};
210 241