aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm.h
diff options
context:
space:
mode:
authorMike Snitzer <snitzer@redhat.com>2009-06-22 05:12:34 -0400
committerAlasdair G Kergon <agk@redhat.com>2009-06-22 05:12:34 -0400
commit754c5fc7ebb417b23601a6222a6005cc2e7f2913 (patch)
tree6c31b055fc26ec541d67fc1123ebaa4b7a8eae7a /drivers/md/dm.h
parent18d8594dd93a1ae2fafd591ec026e87d743292bf (diff)
dm: calculate queue limits during resume not load
Currently, device-mapper maintains a separate instance of 'struct queue_limits' for each table of each device. When the configuration of a device is to be changed, first its table is loaded and this structure is populated, then the device is 'resumed' and the calculated queue_limits are applied. This places restrictions on how userspace may process related devices, where it is often advantageous to 'load' tables for several devices at once before 'resuming' them together. As the new queue_limits only take effect after the 'resume', if they are changing and one device uses another, the latter must be 'resumed' before the former may be 'loaded'. This patch moves the calculation of these queue_limits out of the 'load' operation into 'resume'. Since we are no longer pre-calculating this struct, we no longer need to maintain copies within our dm structs. dm_set_device_limits() now passes the 'start' of the device's data area (aka pe_start) as the 'offset' to blk_stack_limits(). init_valid_queue_limits() is replaced by blk_set_default_limits(). Signed-off-by: Mike Snitzer <snitzer@redhat.com> Cc: martin.petersen@oracle.com Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'drivers/md/dm.h')
-rw-r--r--drivers/md/dm.h5
1 files changed, 4 insertions, 1 deletions
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index b5935c610c44..604e85caadf6 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -41,7 +41,10 @@ void dm_table_event_callback(struct dm_table *t,
41 void (*fn)(void *), void *context); 41 void (*fn)(void *), void *context);
42struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index); 42struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index);
43struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector); 43struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector);
44void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q); 44int dm_calculate_queue_limits(struct dm_table *table,
45 struct queue_limits *limits);
46void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
47 struct queue_limits *limits);
45struct list_head *dm_table_get_devices(struct dm_table *t); 48struct list_head *dm_table_get_devices(struct dm_table *t);
46void dm_table_presuspend_targets(struct dm_table *t); 49void dm_table_presuspend_targets(struct dm_table *t);
47void dm_table_postsuspend_targets(struct dm_table *t); 50void dm_table_postsuspend_targets(struct dm_table *t);