aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorMike Snitzer <snitzer@redhat.com>2009-06-22 05:12:34 -0400
committerAlasdair G Kergon <agk@redhat.com>2009-06-22 05:12:34 -0400
commit754c5fc7ebb417b23601a6222a6005cc2e7f2913 (patch)
tree6c31b055fc26ec541d67fc1123ebaa4b7a8eae7a /include
parent18d8594dd93a1ae2fafd591ec026e87d743292bf (diff)
dm: calculate queue limits during resume not load
Currently, device-mapper maintains a separate instance of 'struct queue_limits' for each table of each device. When the configuration of a device is to be changed, first its table is loaded and this structure is populated, then the device is 'resumed' and the calculated queue_limits are applied. This places restrictions on how userspace may process related devices, where it is often advantageous to 'load' tables for several devices at once before 'resuming' them together. As the new queue_limits only take effect after the 'resume', if they are changing and one device uses another, the latter must be 'resumed' before the former may be 'loaded'. This patch moves the calculation of these queue_limits out of the 'load' operation into 'resume'. Since we are no longer pre-calculating this struct, we no longer need to maintain copies within our dm structs. dm_set_device_limits() now passes the 'start' of the device's data area (aka pe_start) as the 'offset' to blk_stack_limits(). init_valid_queue_limits() is replaced by blk_set_default_limits(). Signed-off-by: Mike Snitzer <snitzer@redhat.com> Cc: martin.petersen@oracle.com Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'include')
-rw-r--r--include/linux/device-mapper.h10
1 files changed, 2 insertions, 8 deletions
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index deac3b4e5e18..e6bf3b8c7bf2 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -103,7 +103,8 @@ void dm_error(const char *message);
103/* 103/*
104 * Combine device limits. 104 * Combine device limits.
105 */ 105 */
106void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev); 106int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
107 sector_t start, void *data);
107 108
108struct dm_dev { 109struct dm_dev {
109 struct block_device *bdev; 110 struct block_device *bdev;
@@ -163,7 +164,6 @@ struct dm_target {
163 sector_t begin; 164 sector_t begin;
164 sector_t len; 165 sector_t len;
165 166
166 /* FIXME: turn this into a mask, and merge with queue_limits */
167 /* Always a power of 2 */ 167 /* Always a power of 2 */
168 sector_t split_io; 168 sector_t split_io;
169 169
@@ -177,12 +177,6 @@ struct dm_target {
177 */ 177 */
178 unsigned num_flush_requests; 178 unsigned num_flush_requests;
179 179
180 /*
181 * These are automatically filled in by
182 * dm_table_get_device.
183 */
184 struct queue_limits limits;
185
186 /* target specific data */ 180 /* target specific data */
187 void *private; 181 void *private;
188 182