aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMike Snitzer <snitzer@redhat.com>2015-02-26 00:50:28 -0500
committerMike Snitzer <snitzer@redhat.com>2015-04-15 12:10:15 -0400
commit0ce65797a77ee780f62909d3128bf08b9735718b (patch)
treecd83882e699392fafdd7cee5f9f34c6648f54555
parentb898320d683d54c2bc17b748b9742d2b601ad453 (diff)
dm: impose configurable deadline for dm_request_fn's merge heuristic
Otherwise, for sequential workloads, the dm_request_fn can allow excessive request merging at the expense of increased service time. Add a per-device sysfs attribute to allow the user to control how long a request, that is a reasonable merge candidate, can be queued on the request queue. The resolution of this request dispatch deadline is in microseconds (ranging from 1 to 100000 usecs), to set a 20us deadline: echo 20 > /sys/block/dm-7/dm/rq_based_seq_io_merge_deadline The dm_request_fn's merge heuristic and associated extra accounting is disabled by default (rq_based_seq_io_merge_deadline is 0). This sysfs attribute is not applicable to bio-based DM devices so it will only ever report 0 for them. By allowing a request to remain on the queue it will block others requests on the queue. But introducing a short dequeue delay has proven very effective at enabling certain sequential IO workloads on really fast, yet IOPS constrained, devices to build up slightly larger IOs -- yielding 90+% throughput improvements. Having precise control over the time taken to wait for larger requests to build affords control beyond that of waiting for certain IO sizes to accumulate (which would require a deadline anyway). This knob will only ever make sense with sequential IO workloads and the particular value used is storage configuration specific. Given the expected niche use-case for when this knob is useful it has been deemed acceptable to expose this relatively crude method for crafting optimal IO on specific storage -- especially given the solution is simple yet effective. In the context of DM multipath, it is advisable to tune this sysfs attribute to a value that offers the best performance for the common case (e.g. if 4 paths are expected active, tune for that; if paths fail then performance may be slightly reduced). Alternatives were explored to have request-based DM autotune this value (e.g. if/when paths fail) but they were quickly deemed too fragile and complex to warrant further design and development time. If this problem proves more common as faster storage emerges we'll have to look at elevating a generic solution into the block core. Tested-by: Shiva Krishna Merla <shivakrishna.merla@netapp.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com>
-rw-r--r--Documentation/ABI/testing/sysfs-block-dm14
-rw-r--r--drivers/md/dm-sysfs.c2
-rw-r--r--drivers/md/dm.c57
-rw-r--r--drivers/md/dm.h4
4 files changed, 73 insertions, 4 deletions
diff --git a/Documentation/ABI/testing/sysfs-block-dm b/Documentation/ABI/testing/sysfs-block-dm
index 87ca5691e29b..ac4b6fe245d9 100644
--- a/Documentation/ABI/testing/sysfs-block-dm
+++ b/Documentation/ABI/testing/sysfs-block-dm
@@ -23,3 +23,17 @@ Description: Device-mapper device suspend state.
23 Contains the value 1 while the device is suspended. 23 Contains the value 1 while the device is suspended.
24 Otherwise it contains 0. Read-only attribute. 24 Otherwise it contains 0. Read-only attribute.
25Users: util-linux, device-mapper udev rules 25Users: util-linux, device-mapper udev rules
26
27What: /sys/block/dm-<num>/dm/rq_based_seq_io_merge_deadline
28Date: March 2015
29KernelVersion: 4.1
30Contact: dm-devel@redhat.com
31Description: Allow control over how long a request that is a
32 reasonable merge candidate can be queued on the request
33 queue. The resolution of this deadline is in
34 microseconds (ranging from 1 to 100000 usecs).
35 Setting this attribute to 0 (the default) will disable
36 request-based DM's merge heuristic and associated extra
37 accounting. This attribute is not applicable to
38 bio-based DM devices so it will only ever report 0 for
39 them.
diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c
index 1271c31709fd..f5bb3944f75e 100644
--- a/drivers/md/dm-sysfs.c
+++ b/drivers/md/dm-sysfs.c
@@ -92,11 +92,13 @@ static ssize_t dm_attr_suspended_show(struct mapped_device *md, char *buf)
92static DM_ATTR_RO(name); 92static DM_ATTR_RO(name);
93static DM_ATTR_RO(uuid); 93static DM_ATTR_RO(uuid);
94static DM_ATTR_RO(suspended); 94static DM_ATTR_RO(suspended);
95static DM_ATTR_RW(rq_based_seq_io_merge_deadline);
95 96
96static struct attribute *dm_attrs[] = { 97static struct attribute *dm_attrs[] = {
97 &dm_attr_name.attr, 98 &dm_attr_name.attr,
98 &dm_attr_uuid.attr, 99 &dm_attr_uuid.attr,
99 &dm_attr_suspended.attr, 100 &dm_attr_suspended.attr,
101 &dm_attr_rq_based_seq_io_merge_deadline.attr,
100 NULL, 102 NULL,
101}; 103};
102 104
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 2ae78b31e4c0..5294e016e92b 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -21,6 +21,7 @@
21#include <linux/delay.h> 21#include <linux/delay.h>
22#include <linux/wait.h> 22#include <linux/wait.h>
23#include <linux/kthread.h> 23#include <linux/kthread.h>
24#include <linux/ktime.h>
24#include <linux/elevator.h> /* for rq_end_sector() */ 25#include <linux/elevator.h> /* for rq_end_sector() */
25 26
26#include <trace/events/block.h> 27#include <trace/events/block.h>
@@ -219,8 +220,10 @@ struct mapped_device {
219 struct task_struct *kworker_task; 220 struct task_struct *kworker_task;
220 221
221 /* for request-based merge heuristic in dm_request_fn() */ 222 /* for request-based merge heuristic in dm_request_fn() */
222 sector_t last_rq_pos; 223 unsigned seq_rq_merge_deadline_usecs;
223 int last_rq_rw; 224 int last_rq_rw;
225 sector_t last_rq_pos;
226 ktime_t last_rq_start_time;
224}; 227};
225 228
226/* 229/*
@@ -1935,8 +1938,11 @@ static void dm_start_request(struct mapped_device *md, struct request *orig)
1935 blk_start_request(orig); 1938 blk_start_request(orig);
1936 atomic_inc(&md->pending[rq_data_dir(orig)]); 1939 atomic_inc(&md->pending[rq_data_dir(orig)]);
1937 1940
1938 md->last_rq_pos = rq_end_sector(orig); 1941 if (md->seq_rq_merge_deadline_usecs) {
1939 md->last_rq_rw = rq_data_dir(orig); 1942 md->last_rq_pos = rq_end_sector(orig);
1943 md->last_rq_rw = rq_data_dir(orig);
1944 md->last_rq_start_time = ktime_get();
1945 }
1940 1946
1941 /* 1947 /*
1942 * Hold the md reference here for the in-flight I/O. 1948 * Hold the md reference here for the in-flight I/O.
@@ -1948,6 +1954,45 @@ static void dm_start_request(struct mapped_device *md, struct request *orig)
1948 dm_get(md); 1954 dm_get(md);
1949} 1955}
1950 1956
1957#define MAX_SEQ_RQ_MERGE_DEADLINE_USECS 100000
1958
1959ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf)
1960{
1961 return sprintf(buf, "%u\n", md->seq_rq_merge_deadline_usecs);
1962}
1963
1964ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
1965 const char *buf, size_t count)
1966{
1967 unsigned deadline;
1968
1969 if (!dm_request_based(md))
1970 return count;
1971
1972 if (kstrtouint(buf, 10, &deadline))
1973 return -EINVAL;
1974
1975 if (deadline > MAX_SEQ_RQ_MERGE_DEADLINE_USECS)
1976 deadline = MAX_SEQ_RQ_MERGE_DEADLINE_USECS;
1977
1978 md->seq_rq_merge_deadline_usecs = deadline;
1979
1980 return count;
1981}
1982
1983static bool dm_request_peeked_before_merge_deadline(struct mapped_device *md)
1984{
1985 ktime_t kt_deadline;
1986
1987 if (!md->seq_rq_merge_deadline_usecs)
1988 return false;
1989
1990 kt_deadline = ns_to_ktime((u64)md->seq_rq_merge_deadline_usecs * NSEC_PER_USEC);
1991 kt_deadline = ktime_add_safe(md->last_rq_start_time, kt_deadline);
1992
1993 return !ktime_after(ktime_get(), kt_deadline);
1994}
1995
1951/* 1996/*
1952 * q->request_fn for request-based dm. 1997 * q->request_fn for request-based dm.
1953 * Called with the queue lock held. 1998 * Called with the queue lock held.
@@ -1990,7 +2035,8 @@ static void dm_request_fn(struct request_queue *q)
1990 continue; 2035 continue;
1991 } 2036 }
1992 2037
1993 if (md_in_flight(md) && rq->bio && rq->bio->bi_vcnt == 1 && 2038 if (dm_request_peeked_before_merge_deadline(md) &&
2039 md_in_flight(md) && rq->bio && rq->bio->bi_vcnt == 1 &&
1994 md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq)) 2040 md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq))
1995 goto delay_and_out; 2041 goto delay_and_out;
1996 2042
@@ -2532,6 +2578,9 @@ static int dm_init_request_based_queue(struct mapped_device *md)
2532 if (!q) 2578 if (!q)
2533 return 0; 2579 return 0;
2534 2580
2581 /* disable dm_request_fn's merge heuristic by default */
2582 md->seq_rq_merge_deadline_usecs = 0;
2583
2535 md->queue = q; 2584 md->queue = q;
2536 dm_init_md_queue(md); 2585 dm_init_md_queue(md);
2537 blk_queue_softirq_done(md->queue, dm_softirq_done); 2586 blk_queue_softirq_done(md->queue, dm_softirq_done);
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index db495863fa5f..5522422cc6c4 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -234,4 +234,8 @@ static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen
234 return !maxlen || strlen(result) + 1 >= maxlen; 234 return !maxlen || strlen(result) + 1 >= maxlen;
235} 235}
236 236
237ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf);
238ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
239 const char *buf, size_t count);
240
237#endif 241#endif