aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block
diff options
context:
space:
mode:
authorAlex Elder <elder@dreamhost.com>2012-01-29 14:57:44 -0500
committerAlex Elder <elder@dreamhost.com>2012-03-22 11:47:47 -0400
commite124a82f3c4efc2cc2bae68a2bf30020fb8c4fc2 (patch)
tree9c1b768d2871b181bcc21d89bedf9bf9dbc6d998 /drivers/block
parent1ddbe94eda58597cb6dd464b455cb62d3f68be7b (diff)
rbd: protect the rbd_dev_list with a spinlock
The rbd_dev_list is just a simple list of all the current rbd_devices. Using the ctl_mutex as a concurrency guard is overkill. Instead, use a spinlock for that specific purpose. This also reduces the window that the ctl_mutex needs to be held in rbd_add(). Signed-off-by: Alex Elder <elder@dreamhost.com> Signed-off-by: Sage Weil <sage@newdream.net>
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/rbd.c33
1 files changed, 22 insertions, 11 deletions
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 62da8ccc5fc..e259feedc7d 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -174,11 +174,13 @@ static struct bus_type rbd_bus_type = {
174 .name = "rbd", 174 .name = "rbd",
175}; 175};
176 176
177static DEFINE_SPINLOCK(node_lock); /* protects client get/put */
178
179static DEFINE_MUTEX(ctl_mutex); /* Serialize open/close/setup/teardown */ 177static DEFINE_MUTEX(ctl_mutex); /* Serialize open/close/setup/teardown */
178
180static LIST_HEAD(rbd_dev_list); /* devices */ 179static LIST_HEAD(rbd_dev_list); /* devices */
180static DEFINE_SPINLOCK(rbd_dev_list_lock);
181
181static LIST_HEAD(rbd_client_list); /* clients */ 182static LIST_HEAD(rbd_client_list); /* clients */
183static DEFINE_SPINLOCK(node_lock); /* protects client get/put */
182 184
183static int __rbd_init_snaps_header(struct rbd_device *rbd_dev); 185static int __rbd_init_snaps_header(struct rbd_device *rbd_dev);
184static void rbd_dev_release(struct device *dev); 186static void rbd_dev_release(struct device *dev);
@@ -2209,12 +2211,12 @@ static ssize_t rbd_add(struct bus_type *bus,
2209 init_rwsem(&rbd_dev->header.snap_rwsem); 2211 init_rwsem(&rbd_dev->header.snap_rwsem);
2210 2212
2211 /* generate unique id: one more than highest used so far */ 2213 /* generate unique id: one more than highest used so far */
2212 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
2213
2214 rbd_dev->id = rbd_id_get(); 2214 rbd_dev->id = rbd_id_get();
2215 2215
2216 /* add to global list */ 2216 /* add to global list */
2217 spin_lock(&rbd_dev_list_lock);
2217 list_add_tail(&rbd_dev->node, &rbd_dev_list); 2218 list_add_tail(&rbd_dev->node, &rbd_dev_list);
2219 spin_unlock(&rbd_dev_list_lock);
2218 2220
2219 /* parse add command */ 2221 /* parse add command */
2220 if (sscanf(buf, "%" __stringify(RBD_MAX_OPT_LEN) "s " 2222 if (sscanf(buf, "%" __stringify(RBD_MAX_OPT_LEN) "s "
@@ -2238,12 +2240,14 @@ static ssize_t rbd_add(struct bus_type *bus,
2238 2240
2239 /* initialize rest of new object */ 2241 /* initialize rest of new object */
2240 snprintf(rbd_dev->name, DEV_NAME_LEN, DRV_NAME "%d", rbd_dev->id); 2242 snprintf(rbd_dev->name, DEV_NAME_LEN, DRV_NAME "%d", rbd_dev->id);
2243
2244 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
2241 rc = rbd_get_client(rbd_dev, mon_dev_name, options); 2245 rc = rbd_get_client(rbd_dev, mon_dev_name, options);
2246 mutex_unlock(&ctl_mutex);
2247
2242 if (rc < 0) 2248 if (rc < 0)
2243 goto err_out_slot; 2249 goto err_out_slot;
2244 2250
2245 mutex_unlock(&ctl_mutex);
2246
2247 /* pick the pool */ 2251 /* pick the pool */
2248 osdc = &rbd_dev->rbd_client->client->osdc; 2252 osdc = &rbd_dev->rbd_client->client->osdc;
2249 rc = ceph_pg_poolid_by_name(osdc->osdmap, rbd_dev->pool_name); 2253 rc = ceph_pg_poolid_by_name(osdc->osdmap, rbd_dev->pool_name);
@@ -2275,9 +2279,9 @@ static ssize_t rbd_add(struct bus_type *bus,
2275 return count; 2279 return count;
2276 2280
2277err_out_bus: 2281err_out_bus:
2278 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); 2282 spin_lock(&rbd_dev_list_lock);
2279 list_del_init(&rbd_dev->node); 2283 list_del_init(&rbd_dev->node);
2280 mutex_unlock(&ctl_mutex); 2284 spin_unlock(&rbd_dev_list_lock);
2281 rbd_id_put(target_id); 2285 rbd_id_put(target_id);
2282 2286
2283 /* this will also clean up rest of rbd_dev stuff */ 2287 /* this will also clean up rest of rbd_dev stuff */
@@ -2291,10 +2295,10 @@ err_out_blkdev:
2291 unregister_blkdev(rbd_dev->major, rbd_dev->name); 2295 unregister_blkdev(rbd_dev->major, rbd_dev->name);
2292err_out_client: 2296err_out_client:
2293 rbd_put_client(rbd_dev); 2297 rbd_put_client(rbd_dev);
2294 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
2295err_out_slot: 2298err_out_slot:
2299 spin_lock(&rbd_dev_list_lock);
2296 list_del_init(&rbd_dev->node); 2300 list_del_init(&rbd_dev->node);
2297 mutex_unlock(&ctl_mutex); 2301 spin_unlock(&rbd_dev_list_lock);
2298 rbd_id_put(target_id); 2302 rbd_id_put(target_id);
2299 2303
2300 kfree(rbd_dev); 2304 kfree(rbd_dev);
@@ -2313,11 +2317,15 @@ static struct rbd_device *__rbd_get_dev(unsigned long id)
2313 struct list_head *tmp; 2317 struct list_head *tmp;
2314 struct rbd_device *rbd_dev; 2318 struct rbd_device *rbd_dev;
2315 2319
2320 spin_lock(&rbd_dev_list_lock);
2316 list_for_each(tmp, &rbd_dev_list) { 2321 list_for_each(tmp, &rbd_dev_list) {
2317 rbd_dev = list_entry(tmp, struct rbd_device, node); 2322 rbd_dev = list_entry(tmp, struct rbd_device, node);
2318 if (rbd_dev->id == id) 2323 if (rbd_dev->id == id) {
2324 spin_unlock(&rbd_dev_list_lock);
2319 return rbd_dev; 2325 return rbd_dev;
2326 }
2320 } 2327 }
2328 spin_unlock(&rbd_dev_list_lock);
2321 return NULL; 2329 return NULL;
2322} 2330}
2323 2331
@@ -2372,7 +2380,10 @@ static ssize_t rbd_remove(struct bus_type *bus,
2372 goto done; 2380 goto done;
2373 } 2381 }
2374 2382
2383 spin_lock(&rbd_dev_list_lock);
2375 list_del_init(&rbd_dev->node); 2384 list_del_init(&rbd_dev->node);
2385 spin_unlock(&rbd_dev_list_lock);
2386
2376 rbd_id_put(target_id); 2387 rbd_id_put(target_id);
2377 2388
2378 __rbd_remove_all_snaps(rbd_dev); 2389 __rbd_remove_all_snaps(rbd_dev);