aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm.c
diff options
context:
space:
mode:
authorMike Snitzer <snitzer@redhat.com>2009-06-22 05:12:34 -0400
committerAlasdair G Kergon <agk@redhat.com>2009-06-22 05:12:34 -0400
commit754c5fc7ebb417b23601a6222a6005cc2e7f2913 (patch)
tree6c31b055fc26ec541d67fc1123ebaa4b7a8eae7a /drivers/md/dm.c
parent18d8594dd93a1ae2fafd591ec026e87d743292bf (diff)
dm: calculate queue limits during resume not load
Currently, device-mapper maintains a separate instance of 'struct queue_limits' for each table of each device. When the configuration of a device is to be changed, first its table is loaded and this structure is populated, then the device is 'resumed' and the calculated queue_limits are applied. This places restrictions on how userspace may process related devices, where it is often advantageous to 'load' tables for several devices at once before 'resuming' them together. As the new queue_limits only take effect after the 'resume', if they are changing and one device uses another, the latter must be 'resumed' before the former may be 'loaded'. This patch moves the calculation of these queue_limits out of the 'load' operation into 'resume'. Since we are no longer pre-calculating this struct, we no longer need to maintain copies within our dm structs. dm_set_device_limits() now passes the 'start' of the device's data area (aka pe_start) as the 'offset' to blk_stack_limits(). init_valid_queue_limits() is replaced by blk_set_default_limits(). Signed-off-by: Mike Snitzer <snitzer@redhat.com> Cc: martin.petersen@oracle.com Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'drivers/md/dm.c')
-rw-r--r--drivers/md/dm.c12
1 files changed, 9 insertions, 3 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index a9210bb594e7..f609793a92d0 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1313,7 +1313,8 @@ static void __set_size(struct mapped_device *md, sector_t size)
1313 mutex_unlock(&md->bdev->bd_inode->i_mutex); 1313 mutex_unlock(&md->bdev->bd_inode->i_mutex);
1314} 1314}
1315 1315
1316static int __bind(struct mapped_device *md, struct dm_table *t) 1316static int __bind(struct mapped_device *md, struct dm_table *t,
1317 struct queue_limits *limits)
1317{ 1318{
1318 struct request_queue *q = md->queue; 1319 struct request_queue *q = md->queue;
1319 sector_t size; 1320 sector_t size;
@@ -1337,7 +1338,7 @@ static int __bind(struct mapped_device *md, struct dm_table *t)
1337 1338
1338 write_lock(&md->map_lock); 1339 write_lock(&md->map_lock);
1339 md->map = t; 1340 md->map = t;
1340 dm_table_set_restrictions(t, q); 1341 dm_table_set_restrictions(t, q, limits);
1341 write_unlock(&md->map_lock); 1342 write_unlock(&md->map_lock);
1342 1343
1343 return 0; 1344 return 0;
@@ -1562,6 +1563,7 @@ static void dm_queue_flush(struct mapped_device *md)
1562 */ 1563 */
1563int dm_swap_table(struct mapped_device *md, struct dm_table *table) 1564int dm_swap_table(struct mapped_device *md, struct dm_table *table)
1564{ 1565{
1566 struct queue_limits limits;
1565 int r = -EINVAL; 1567 int r = -EINVAL;
1566 1568
1567 mutex_lock(&md->suspend_lock); 1569 mutex_lock(&md->suspend_lock);
@@ -1570,8 +1572,12 @@ int dm_swap_table(struct mapped_device *md, struct dm_table *table)
1570 if (!dm_suspended(md)) 1572 if (!dm_suspended(md))
1571 goto out; 1573 goto out;
1572 1574
1575 r = dm_calculate_queue_limits(table, &limits);
1576 if (r)
1577 goto out;
1578
1573 __unbind(md); 1579 __unbind(md);
1574 r = __bind(md, table); 1580 r = __bind(md, table, &limits);
1575 1581
1576out: 1582out:
1577 mutex_unlock(&md->suspend_lock); 1583 mutex_unlock(&md->suspend_lock);