aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/rbd.c
diff options
context:
space:
mode:
authorAlex Elder <elder@inktank.com>2013-05-01 13:43:04 -0400
committerAlex Elder <elder@inktank.com>2013-05-02 12:58:30 -0400
commit78c2a44aae2950ecf0279590572b861288714946 (patch)
tree0b08672df26d2ecc61726d4577657633871a2269 /drivers/block/rbd.c
parent868311b1ebc9b203bae0d6d1f012ea5cbdadca03 (diff)
rbd: allocate image object names with a slab allocator
The names of objects used for image object requests are always fixed size. So create a slab cache to manage them. Define a new function rbd_segment_name_free() to match rbd_segment_name() (which is what supplies the dynamically-allocated name buffer). This is part of: http://tracker.ceph.com/issues/3926 Signed-off-by: Alex Elder <elder@inktank.com> Reviewed-by: Josh Durgin <josh.durgin@inktank.com>
Diffstat (limited to 'drivers/block/rbd.c')
-rw-r--r--drivers/block/rbd.c32
1 files changed, 29 insertions, 3 deletions
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index a72842aa3b53..390946a078be 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -345,8 +345,11 @@ static DEFINE_SPINLOCK(rbd_dev_list_lock);
345static LIST_HEAD(rbd_client_list); /* clients */ 345static LIST_HEAD(rbd_client_list); /* clients */
346static DEFINE_SPINLOCK(rbd_client_list_lock); 346static DEFINE_SPINLOCK(rbd_client_list_lock);
347 347
348/* Slab caches for frequently-allocated structures */
349
348static struct kmem_cache *rbd_img_request_cache; 350static struct kmem_cache *rbd_img_request_cache;
349static struct kmem_cache *rbd_obj_request_cache; 351static struct kmem_cache *rbd_obj_request_cache;
352static struct kmem_cache *rbd_segment_name_cache;
350 353
351static int rbd_img_request_submit(struct rbd_img_request *img_request); 354static int rbd_img_request_submit(struct rbd_img_request *img_request);
352 355
@@ -985,7 +988,7 @@ static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
985 u64 segment; 988 u64 segment;
986 int ret; 989 int ret;
987 990
988 name = kmalloc(MAX_OBJ_NAME_SIZE + 1, GFP_NOIO); 991 name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO);
989 if (!name) 992 if (!name)
990 return NULL; 993 return NULL;
991 segment = offset >> rbd_dev->header.obj_order; 994 segment = offset >> rbd_dev->header.obj_order;
@@ -1001,6 +1004,13 @@ static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
1001 return name; 1004 return name;
1002} 1005}
1003 1006
1007static void rbd_segment_name_free(const char *name)
1008{
1009 /* The explicit cast here is needed to drop the const qualifier */
1010
1011 kmem_cache_free(rbd_segment_name_cache, (void *)name);
1012}
1013
1004static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset) 1014static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
1005{ 1015{
1006 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order; 1016 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
@@ -2033,7 +2043,8 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request,
2033 length = rbd_segment_length(rbd_dev, img_offset, resid); 2043 length = rbd_segment_length(rbd_dev, img_offset, resid);
2034 obj_request = rbd_obj_request_create(object_name, 2044 obj_request = rbd_obj_request_create(object_name,
2035 offset, length, type); 2045 offset, length, type);
2036 kfree(object_name); /* object request has its own copy */ 2046 /* object request has its own copy of the object name */
2047 rbd_segment_name_free(object_name);
2037 if (!obj_request) 2048 if (!obj_request)
2038 goto out_unwind; 2049 goto out_unwind;
2039 2050
@@ -5018,8 +5029,19 @@ static int rbd_slab_init(void)
5018 sizeof (struct rbd_obj_request), 5029 sizeof (struct rbd_obj_request),
5019 __alignof__(struct rbd_obj_request), 5030 __alignof__(struct rbd_obj_request),
5020 0, NULL); 5031 0, NULL);
5021 if (rbd_obj_request_cache) 5032 if (!rbd_obj_request_cache)
5033 goto out_err;
5034
5035 rbd_assert(!rbd_segment_name_cache);
5036 rbd_segment_name_cache = kmem_cache_create("rbd_segment_name",
5037 MAX_OBJ_NAME_SIZE + 1, 1, 0, NULL);
5038 if (rbd_segment_name_cache)
5022 return 0; 5039 return 0;
5040out_err:
5041 if (rbd_obj_request_cache) {
5042 kmem_cache_destroy(rbd_obj_request_cache);
5043 rbd_obj_request_cache = NULL;
5044 }
5023 5045
5024 kmem_cache_destroy(rbd_img_request_cache); 5046 kmem_cache_destroy(rbd_img_request_cache);
5025 rbd_img_request_cache = NULL; 5047 rbd_img_request_cache = NULL;
@@ -5029,6 +5051,10 @@ static int rbd_slab_init(void)
5029 5051
5030static void rbd_slab_exit(void) 5052static void rbd_slab_exit(void)
5031{ 5053{
5054 rbd_assert(rbd_segment_name_cache);
5055 kmem_cache_destroy(rbd_segment_name_cache);
5056 rbd_segment_name_cache = NULL;
5057
5032 rbd_assert(rbd_obj_request_cache); 5058 rbd_assert(rbd_obj_request_cache);
5033 kmem_cache_destroy(rbd_obj_request_cache); 5059 kmem_cache_destroy(rbd_obj_request_cache);
5034 rbd_obj_request_cache = NULL; 5060 rbd_obj_request_cache = NULL;