aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKiyoshi Ueda <k-ueda@ct.jp.nec.com>2009-01-05 22:05:07 -0500
committerAlasdair G Kergon <agk@redhat.com>2009-01-05 22:05:07 -0500
commit7d76345da6ed3927c9cbf5d3f7a7021e8bba7374 (patch)
treed0b470dd1a55dfffb27ffa012e4a5afebd133495
parent8fbf26ad5b16ad3a826ca7fe3e86700420abed1f (diff)
dm request: extend target interface
This patch adds the following target interfaces for request-based dm. map_rq : for mapping a request rq_end_io : for finishing a request busy : for avoiding performance regression from bio-based dm. Target can tell dm core not to map requests now, and that may help requests in the block layer queue to be bigger by I/O merging. In bio-based dm, this behavior is done by device drivers managing the block layer queue. But in request-based dm, dm core has to do that since dm core manages the block layer queue. Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com> Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com>
-rw-r--r--include/linux/device-mapper.h15
1 files changed, 15 insertions, 0 deletions
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 89ff2df40240..c1ba76c7c0e5 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -45,6 +45,8 @@ typedef void (*dm_dtr_fn) (struct dm_target *ti);
45 */ 45 */
46typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio, 46typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio,
47 union map_info *map_context); 47 union map_info *map_context);
48typedef int (*dm_map_request_fn) (struct dm_target *ti, struct request *clone,
49 union map_info *map_context);
48 50
49/* 51/*
50 * Returns: 52 * Returns:
@@ -57,6 +59,9 @@ typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio,
57typedef int (*dm_endio_fn) (struct dm_target *ti, 59typedef int (*dm_endio_fn) (struct dm_target *ti,
58 struct bio *bio, int error, 60 struct bio *bio, int error,
59 union map_info *map_context); 61 union map_info *map_context);
62typedef int (*dm_request_endio_fn) (struct dm_target *ti,
63 struct request *clone, int error,
64 union map_info *map_context);
60 65
61typedef void (*dm_flush_fn) (struct dm_target *ti); 66typedef void (*dm_flush_fn) (struct dm_target *ti);
62typedef void (*dm_presuspend_fn) (struct dm_target *ti); 67typedef void (*dm_presuspend_fn) (struct dm_target *ti);
@@ -75,6 +80,13 @@ typedef int (*dm_ioctl_fn) (struct dm_target *ti, unsigned int cmd,
75typedef int (*dm_merge_fn) (struct dm_target *ti, struct bvec_merge_data *bvm, 80typedef int (*dm_merge_fn) (struct dm_target *ti, struct bvec_merge_data *bvm,
76 struct bio_vec *biovec, int max_size); 81 struct bio_vec *biovec, int max_size);
77 82
83/*
84 * Returns:
85 * 0: The target can handle the next I/O immediately.
86 * 1: The target can't handle the next I/O immediately.
87 */
88typedef int (*dm_busy_fn) (struct dm_target *ti);
89
78void dm_error(const char *message); 90void dm_error(const char *message);
79 91
80/* 92/*
@@ -107,7 +119,9 @@ struct target_type {
107 dm_ctr_fn ctr; 119 dm_ctr_fn ctr;
108 dm_dtr_fn dtr; 120 dm_dtr_fn dtr;
109 dm_map_fn map; 121 dm_map_fn map;
122 dm_map_request_fn map_rq;
110 dm_endio_fn end_io; 123 dm_endio_fn end_io;
124 dm_request_endio_fn rq_end_io;
111 dm_flush_fn flush; 125 dm_flush_fn flush;
112 dm_presuspend_fn presuspend; 126 dm_presuspend_fn presuspend;
113 dm_postsuspend_fn postsuspend; 127 dm_postsuspend_fn postsuspend;
@@ -117,6 +131,7 @@ struct target_type {
117 dm_message_fn message; 131 dm_message_fn message;
118 dm_ioctl_fn ioctl; 132 dm_ioctl_fn ioctl;
119 dm_merge_fn merge; 133 dm_merge_fn merge;
134 dm_busy_fn busy;
120}; 135};
121 136
122struct io_restrictions { 137struct io_restrictions {