aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlex Elder <elder@inktank.com>2013-05-29 12:19:00 -0400
committerSage Weil <sage@inktank.com>2013-07-03 18:32:39 -0400
commit08f75463c15e26e9d67a7c992ce7dd8964c6cbdd (patch)
treeb1ec2df4acc47286381aa0085eeb5c76f03e628d
parent3b5cf2a2f1746a253d56f54ffbb45170c90b1cbd (diff)
rbd: protect against duplicate client creation
If more than one rbd image has the same ceph cluster configuration (same options, same set of monitors, same keys) they normally share a single rbd client. When an image is getting mapped, rbd looks to see if an existing client can be used, and creates a new one if not. The lookup and creation are not done under a common lock though, so mapping two images concurrently could lead to duplicate clients getting set up needlessly. This isn't a major problem, but it's wasteful and different from what's intended. This patch fixes that by using the control mutex to protect both the lookup and (if needed) creation of the client. It was previously used just when creating. This resolves: http://tracker.ceph.com/issues/3094 Signed-off-by: Alex Elder <elder@inktank.com> Reviewed-by: Josh Durgin <josh.durgin@inktank.com>
-rw-r--r--drivers/block/rbd.c17
1 files changed, 7 insertions, 10 deletions
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index d4902c52bf26..fd2795d1136a 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -520,7 +520,7 @@ static const struct block_device_operations rbd_bd_ops = {
520 520
521/* 521/*
522 * Initialize an rbd client instance. Success or not, this function 522 * Initialize an rbd client instance. Success or not, this function
523 * consumes ceph_opts. 523 * consumes ceph_opts. Caller holds ctl_mutex.
524 */ 524 */
525static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts) 525static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
526{ 526{
@@ -535,30 +535,25 @@ static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
535 kref_init(&rbdc->kref); 535 kref_init(&rbdc->kref);
536 INIT_LIST_HEAD(&rbdc->node); 536 INIT_LIST_HEAD(&rbdc->node);
537 537
538 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
539
540 rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0); 538 rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
541 if (IS_ERR(rbdc->client)) 539 if (IS_ERR(rbdc->client))
542 goto out_mutex; 540 goto out_rbdc;
543 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */ 541 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
544 542
545 ret = ceph_open_session(rbdc->client); 543 ret = ceph_open_session(rbdc->client);
546 if (ret < 0) 544 if (ret < 0)
547 goto out_err; 545 goto out_client;
548 546
549 spin_lock(&rbd_client_list_lock); 547 spin_lock(&rbd_client_list_lock);
550 list_add_tail(&rbdc->node, &rbd_client_list); 548 list_add_tail(&rbdc->node, &rbd_client_list);
551 spin_unlock(&rbd_client_list_lock); 549 spin_unlock(&rbd_client_list_lock);
552 550
553 mutex_unlock(&ctl_mutex);
554 dout("%s: rbdc %p\n", __func__, rbdc); 551 dout("%s: rbdc %p\n", __func__, rbdc);
555 552
556 return rbdc; 553 return rbdc;
557 554out_client:
558out_err:
559 ceph_destroy_client(rbdc->client); 555 ceph_destroy_client(rbdc->client);
560out_mutex: 556out_rbdc:
561 mutex_unlock(&ctl_mutex);
562 kfree(rbdc); 557 kfree(rbdc);
563out_opt: 558out_opt:
564 if (ceph_opts) 559 if (ceph_opts)
@@ -682,11 +677,13 @@ static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
682{ 677{
683 struct rbd_client *rbdc; 678 struct rbd_client *rbdc;
684 679
680 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
685 rbdc = rbd_client_find(ceph_opts); 681 rbdc = rbd_client_find(ceph_opts);
686 if (rbdc) /* using an existing client */ 682 if (rbdc) /* using an existing client */
687 ceph_destroy_options(ceph_opts); 683 ceph_destroy_options(ceph_opts);
688 else 684 else
689 rbdc = rbd_client_create(ceph_opts); 685 rbdc = rbd_client_create(ceph_opts);
686 mutex_unlock(&ctl_mutex);
690 687
691 return rbdc; 688 return rbdc;
692} 689}