aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-block-dm14
-rw-r--r--drivers/md/dm-sysfs.c2
-rw-r--r--drivers/md/dm.c57
-rw-r--r--drivers/md/dm.h4
4 files changed, 73 insertions, 4 deletions
diff --git a/Documentation/ABI/testing/sysfs-block-dm b/Documentation/ABI/testing/sysfs-block-dm
index 87ca5691e29b..ac4b6fe245d9 100644
--- a/Documentation/ABI/testing/sysfs-block-dm
+++ b/Documentation/ABI/testing/sysfs-block-dm
@@ -23,3 +23,17 @@ Description: Device-mapper device suspend state.
23 Contains the value 1 while the device is suspended. 23 Contains the value 1 while the device is suspended.
24 Otherwise it contains 0. Read-only attribute. 24 Otherwise it contains 0. Read-only attribute.
25Users: util-linux, device-mapper udev rules 25Users: util-linux, device-mapper udev rules
26
27What: /sys/block/dm-<num>/dm/rq_based_seq_io_merge_deadline
28Date: March 2015
29KernelVersion: 4.1
30Contact: dm-devel@redhat.com
31Description: Allow control over how long a request that is a
32 reasonable merge candidate can be queued on the request
33 queue. The resolution of this deadline is in
34 microseconds (ranging from 1 to 100000 usecs).
35 Setting this attribute to 0 (the default) will disable
36 request-based DM's merge heuristic and associated extra
37 accounting. This attribute is not applicable to
38 bio-based DM devices so it will only ever report 0 for
39 them.
diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c
index 1271c31709fd..f5bb3944f75e 100644
--- a/drivers/md/dm-sysfs.c
+++ b/drivers/md/dm-sysfs.c
@@ -92,11 +92,13 @@ static ssize_t dm_attr_suspended_show(struct mapped_device *md, char *buf)
92static DM_ATTR_RO(name); 92static DM_ATTR_RO(name);
93static DM_ATTR_RO(uuid); 93static DM_ATTR_RO(uuid);
94static DM_ATTR_RO(suspended); 94static DM_ATTR_RO(suspended);
95static DM_ATTR_RW(rq_based_seq_io_merge_deadline);
95 96
96static struct attribute *dm_attrs[] = { 97static struct attribute *dm_attrs[] = {
97 &dm_attr_name.attr, 98 &dm_attr_name.attr,
98 &dm_attr_uuid.attr, 99 &dm_attr_uuid.attr,
99 &dm_attr_suspended.attr, 100 &dm_attr_suspended.attr,
101 &dm_attr_rq_based_seq_io_merge_deadline.attr,
100 NULL, 102 NULL,
101}; 103};
102 104
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 2ae78b31e4c0..5294e016e92b 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -21,6 +21,7 @@
21#include <linux/delay.h> 21#include <linux/delay.h>
22#include <linux/wait.h> 22#include <linux/wait.h>
23#include <linux/kthread.h> 23#include <linux/kthread.h>
24#include <linux/ktime.h>
24#include <linux/elevator.h> /* for rq_end_sector() */ 25#include <linux/elevator.h> /* for rq_end_sector() */
25 26
26#include <trace/events/block.h> 27#include <trace/events/block.h>
@@ -219,8 +220,10 @@ struct mapped_device {
219 struct task_struct *kworker_task; 220 struct task_struct *kworker_task;
220 221
221 /* for request-based merge heuristic in dm_request_fn() */ 222 /* for request-based merge heuristic in dm_request_fn() */
222 sector_t last_rq_pos; 223 unsigned seq_rq_merge_deadline_usecs;
223 int last_rq_rw; 224 int last_rq_rw;
225 sector_t last_rq_pos;
226 ktime_t last_rq_start_time;
224}; 227};
225 228
226/* 229/*
@@ -1935,8 +1938,11 @@ static void dm_start_request(struct mapped_device *md, struct request *orig)
1935 blk_start_request(orig); 1938 blk_start_request(orig);
1936 atomic_inc(&md->pending[rq_data_dir(orig)]); 1939 atomic_inc(&md->pending[rq_data_dir(orig)]);
1937 1940
1938 md->last_rq_pos = rq_end_sector(orig); 1941 if (md->seq_rq_merge_deadline_usecs) {
1939 md->last_rq_rw = rq_data_dir(orig); 1942 md->last_rq_pos = rq_end_sector(orig);
1943 md->last_rq_rw = rq_data_dir(orig);
1944 md->last_rq_start_time = ktime_get();
1945 }
1940 1946
1941 /* 1947 /*
1942 * Hold the md reference here for the in-flight I/O. 1948 * Hold the md reference here for the in-flight I/O.
@@ -1948,6 +1954,45 @@ static void dm_start_request(struct mapped_device *md, struct request *orig)
1948 dm_get(md); 1954 dm_get(md);
1949} 1955}
1950 1956
1957#define MAX_SEQ_RQ_MERGE_DEADLINE_USECS 100000
1958
1959ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf)
1960{
1961 return sprintf(buf, "%u\n", md->seq_rq_merge_deadline_usecs);
1962}
1963
1964ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
1965 const char *buf, size_t count)
1966{
1967 unsigned deadline;
1968
1969 if (!dm_request_based(md))
1970 return count;
1971
1972 if (kstrtouint(buf, 10, &deadline))
1973 return -EINVAL;
1974
1975 if (deadline > MAX_SEQ_RQ_MERGE_DEADLINE_USECS)
1976 deadline = MAX_SEQ_RQ_MERGE_DEADLINE_USECS;
1977
1978 md->seq_rq_merge_deadline_usecs = deadline;
1979
1980 return count;
1981}
1982
1983static bool dm_request_peeked_before_merge_deadline(struct mapped_device *md)
1984{
1985 ktime_t kt_deadline;
1986
1987 if (!md->seq_rq_merge_deadline_usecs)
1988 return false;
1989
1990 kt_deadline = ns_to_ktime((u64)md->seq_rq_merge_deadline_usecs * NSEC_PER_USEC);
1991 kt_deadline = ktime_add_safe(md->last_rq_start_time, kt_deadline);
1992
1993 return !ktime_after(ktime_get(), kt_deadline);
1994}
1995
1951/* 1996/*
1952 * q->request_fn for request-based dm. 1997 * q->request_fn for request-based dm.
1953 * Called with the queue lock held. 1998 * Called with the queue lock held.
@@ -1990,7 +2035,8 @@ static void dm_request_fn(struct request_queue *q)
1990 continue; 2035 continue;
1991 } 2036 }
1992 2037
1993 if (md_in_flight(md) && rq->bio && rq->bio->bi_vcnt == 1 && 2038 if (dm_request_peeked_before_merge_deadline(md) &&
2039 md_in_flight(md) && rq->bio && rq->bio->bi_vcnt == 1 &&
1994 md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq)) 2040 md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq))
1995 goto delay_and_out; 2041 goto delay_and_out;
1996 2042
@@ -2532,6 +2578,9 @@ static int dm_init_request_based_queue(struct mapped_device *md)
2532 if (!q) 2578 if (!q)
2533 return 0; 2579 return 0;
2534 2580
2581 /* disable dm_request_fn's merge heuristic by default */
2582 md->seq_rq_merge_deadline_usecs = 0;
2583
2535 md->queue = q; 2584 md->queue = q;
2536 dm_init_md_queue(md); 2585 dm_init_md_queue(md);
2537 blk_queue_softirq_done(md->queue, dm_softirq_done); 2586 blk_queue_softirq_done(md->queue, dm_softirq_done);
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index db495863fa5f..5522422cc6c4 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -234,4 +234,8 @@ static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen
234 return !maxlen || strlen(result) + 1 >= maxlen; 234 return !maxlen || strlen(result) + 1 >= maxlen;
235} 235}
236 236
237ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf);
238ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
239 const char *buf, size_t count);
240
237#endif 241#endif