aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm.c
diff options
context:
space:
mode:
authorKiyoshi Ueda <k-ueda@ct.jp.nec.com>2009-06-22 05:12:37 -0400
committerAlasdair G Kergon <agk@redhat.com>2009-06-22 05:12:37 -0400
commit523d9297d43cce3fa6de6474b7674329e98743b1 (patch)
treeb889df9e26458c134a909c9ada51fa961fbf1291 /drivers/md/dm.c
parent5d67aa2366ccb8257d103d0b43df855605c3c086 (diff)
dm: disable interrupt when taking map_lock
This patch disables interrupt when taking map_lock to avoid lockdep warnings in request-based dm. request-based dm takes map_lock after taking queue_lock with disabling interrupt: spin_lock_irqsave(queue_lock) q->request_fn() == dm_request_fn() => dm_get_table() => read_lock(map_lock) while queue_lock could be (but isn't) taken in interrupt context. Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com> Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com> Acked-by: Christof Schmitt <christof.schmitt@de.ibm.com> Acked-by: Hannes Reinecke <hare@suse.de> Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'drivers/md/dm.c')
-rw-r--r--drivers/md/dm.c15
1 files changed, 9 insertions, 6 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 00c768860818..3c6d4ee8921d 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -512,12 +512,13 @@ static void queue_io(struct mapped_device *md, struct bio *bio)
512struct dm_table *dm_get_table(struct mapped_device *md) 512struct dm_table *dm_get_table(struct mapped_device *md)
513{ 513{
514 struct dm_table *t; 514 struct dm_table *t;
515 unsigned long flags;
515 516
516 read_lock(&md->map_lock); 517 read_lock_irqsave(&md->map_lock, flags);
517 t = md->map; 518 t = md->map;
518 if (t) 519 if (t)
519 dm_table_get(t); 520 dm_table_get(t);
520 read_unlock(&md->map_lock); 521 read_unlock_irqrestore(&md->map_lock, flags);
521 522
522 return t; 523 return t;
523} 524}
@@ -1910,6 +1911,7 @@ static int __bind(struct mapped_device *md, struct dm_table *t,
1910{ 1911{
1911 struct request_queue *q = md->queue; 1912 struct request_queue *q = md->queue;
1912 sector_t size; 1913 sector_t size;
1914 unsigned long flags;
1913 1915
1914 size = dm_table_get_size(t); 1916 size = dm_table_get_size(t);
1915 1917
@@ -1940,10 +1942,10 @@ static int __bind(struct mapped_device *md, struct dm_table *t,
1940 1942
1941 __bind_mempools(md, t); 1943 __bind_mempools(md, t);
1942 1944
1943 write_lock(&md->map_lock); 1945 write_lock_irqsave(&md->map_lock, flags);
1944 md->map = t; 1946 md->map = t;
1945 dm_table_set_restrictions(t, q, limits); 1947 dm_table_set_restrictions(t, q, limits);
1946 write_unlock(&md->map_lock); 1948 write_unlock_irqrestore(&md->map_lock, flags);
1947 1949
1948 return 0; 1950 return 0;
1949} 1951}
@@ -1951,14 +1953,15 @@ static int __bind(struct mapped_device *md, struct dm_table *t,
1951static void __unbind(struct mapped_device *md) 1953static void __unbind(struct mapped_device *md)
1952{ 1954{
1953 struct dm_table *map = md->map; 1955 struct dm_table *map = md->map;
1956 unsigned long flags;
1954 1957
1955 if (!map) 1958 if (!map)
1956 return; 1959 return;
1957 1960
1958 dm_table_event_callback(map, NULL, NULL); 1961 dm_table_event_callback(map, NULL, NULL);
1959 write_lock(&md->map_lock); 1962 write_lock_irqsave(&md->map_lock, flags);
1960 md->map = NULL; 1963 md->map = NULL;
1961 write_unlock(&md->map_lock); 1964 write_unlock_irqrestore(&md->map_lock, flags);
1962 dm_table_destroy(map); 1965 dm_table_destroy(map);
1963} 1966}
1964 1967