aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block
diff options
context:
space:
mode:
authorIlya Dryomov <ilya.dryomov@inktank.com>2013-12-13 08:28:57 -0500
committerIlya Dryomov <ilya.dryomov@inktank.com>2013-12-31 13:31:56 -0500
commitf8a22fc238a449ff982bfb40e30c3f3c9c90a08a (patch)
tree040d38a199ab4ad0e9814a4826e58f0f2725c374 /drivers/block
parente1b4d96dea61c3078775090e8b121f571aab8fda (diff)
rbd: switch to ida for rbd id assignments
Currently rbd ids are allocated using an atomic variable that keeps track of the highest id currently in use and each new id is simply one more than the value of that variable. That's nice and cheap, but it does mean that rbd ids are allowed to grow boundlessly, and, more importantly, it's completely unpredictable. So, in preparation for single-major device number allocation scheme, which is going to establish and rely on a constant mapping between rbd ids and device numbers, switch to ida for rbd id assignments. Signed-off-by: Ilya Dryomov <ilya.dryomov@inktank.com> Reviewed-by: Alex Elder <elder@linaro.org> Reviewed-by: Josh Durgin <josh.durgin@inktank.com>
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/rbd.c69
1 files changed, 23 insertions, 46 deletions
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 8b78a08483a6..d250549d27a4 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -41,6 +41,7 @@
41#include <linux/fs.h> 41#include <linux/fs.h>
42#include <linux/blkdev.h> 42#include <linux/blkdev.h>
43#include <linux/slab.h> 43#include <linux/slab.h>
44#include <linux/idr.h>
44 45
45#include "rbd_types.h" 46#include "rbd_types.h"
46 47
@@ -385,6 +386,8 @@ static struct kmem_cache *rbd_img_request_cache;
385static struct kmem_cache *rbd_obj_request_cache; 386static struct kmem_cache *rbd_obj_request_cache;
386static struct kmem_cache *rbd_segment_name_cache; 387static struct kmem_cache *rbd_segment_name_cache;
387 388
389static DEFINE_IDA(rbd_dev_id_ida);
390
388static int rbd_img_request_submit(struct rbd_img_request *img_request); 391static int rbd_img_request_submit(struct rbd_img_request *img_request);
389 392
390static void rbd_dev_device_release(struct device *dev); 393static void rbd_dev_device_release(struct device *dev);
@@ -4371,20 +4374,27 @@ static void rbd_bus_del_dev(struct rbd_device *rbd_dev)
4371 device_unregister(&rbd_dev->dev); 4374 device_unregister(&rbd_dev->dev);
4372} 4375}
4373 4376
4374static atomic64_t rbd_dev_id_max = ATOMIC64_INIT(0);
4375
4376/* 4377/*
4377 * Get a unique rbd identifier for the given new rbd_dev, and add 4378 * Get a unique rbd identifier for the given new rbd_dev, and add
4378 * the rbd_dev to the global list. The minimum rbd id is 1. 4379 * the rbd_dev to the global list.
4379 */ 4380 */
4380static void rbd_dev_id_get(struct rbd_device *rbd_dev) 4381static int rbd_dev_id_get(struct rbd_device *rbd_dev)
4381{ 4382{
4382 rbd_dev->dev_id = atomic64_inc_return(&rbd_dev_id_max); 4383 int new_dev_id;
4384
4385 new_dev_id = ida_simple_get(&rbd_dev_id_ida, 0, 0, GFP_KERNEL);
4386 if (new_dev_id < 0)
4387 return new_dev_id;
4388
4389 rbd_dev->dev_id = new_dev_id;
4383 4390
4384 spin_lock(&rbd_dev_list_lock); 4391 spin_lock(&rbd_dev_list_lock);
4385 list_add_tail(&rbd_dev->node, &rbd_dev_list); 4392 list_add_tail(&rbd_dev->node, &rbd_dev_list);
4386 spin_unlock(&rbd_dev_list_lock); 4393 spin_unlock(&rbd_dev_list_lock);
4394
4387 dout("rbd_dev %p given dev id %d\n", rbd_dev, rbd_dev->dev_id); 4395 dout("rbd_dev %p given dev id %d\n", rbd_dev, rbd_dev->dev_id);
4396
4397 return 0;
4388} 4398}
4389 4399
4390/* 4400/*
@@ -4393,48 +4403,13 @@ static void rbd_dev_id_get(struct rbd_device *rbd_dev)
4393 */ 4403 */
4394static void rbd_dev_id_put(struct rbd_device *rbd_dev) 4404static void rbd_dev_id_put(struct rbd_device *rbd_dev)
4395{ 4405{
4396 struct list_head *tmp;
4397 int rbd_id = rbd_dev->dev_id;
4398 int max_id;
4399
4400 rbd_assert(rbd_id > 0);
4401
4402 dout("rbd_dev %p released dev id %d\n", rbd_dev, rbd_dev->dev_id);
4403 spin_lock(&rbd_dev_list_lock); 4406 spin_lock(&rbd_dev_list_lock);
4404 list_del_init(&rbd_dev->node); 4407 list_del_init(&rbd_dev->node);
4405
4406 /*
4407 * If the id being "put" is not the current maximum, there
4408 * is nothing special we need to do.
4409 */
4410 if (rbd_id != atomic64_read(&rbd_dev_id_max)) {
4411 spin_unlock(&rbd_dev_list_lock);
4412 return;
4413 }
4414
4415 /*
4416 * We need to update the current maximum id. Search the
4417 * list to find out what it is. We're more likely to find
4418 * the maximum at the end, so search the list backward.
4419 */
4420 max_id = 0;
4421 list_for_each_prev(tmp, &rbd_dev_list) {
4422 struct rbd_device *rbd_dev;
4423
4424 rbd_dev = list_entry(tmp, struct rbd_device, node);
4425 if (rbd_dev->dev_id > max_id)
4426 max_id = rbd_dev->dev_id;
4427 }
4428 spin_unlock(&rbd_dev_list_lock); 4408 spin_unlock(&rbd_dev_list_lock);
4429 4409
4430 /* 4410 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
4431 * The max id could have been updated by rbd_dev_id_get(), in 4411
4432 * which case it now accurately reflects the new maximum. 4412 dout("rbd_dev %p released dev id %d\n", rbd_dev, rbd_dev->dev_id);
4433 * Be careful not to overwrite the maximum value in that
4434 * case.
4435 */
4436 atomic64_cmpxchg(&rbd_dev_id_max, rbd_id, max_id);
4437 dout(" max dev id has been reset\n");
4438} 4413}
4439 4414
4440/* 4415/*
@@ -4857,10 +4832,12 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
4857{ 4832{
4858 int ret; 4833 int ret;
4859 4834
4860 /* generate unique id: find highest unique id, add one */ 4835 /* Get an id and fill in device name. */
4861 rbd_dev_id_get(rbd_dev); 4836
4837 ret = rbd_dev_id_get(rbd_dev);
4838 if (ret)
4839 return ret;
4862 4840
4863 /* Fill in the device name, now that we have its id. */
4864 BUILD_BUG_ON(DEV_NAME_LEN 4841 BUILD_BUG_ON(DEV_NAME_LEN
4865 < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH); 4842 < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
4866 sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id); 4843 sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);