diff options
author | Alex Elder <elder@dreamhost.com> | 2012-01-29 14:57:43 -0500 |
---|---|---|
committer | Sage Weil <sage@newdream.net> | 2012-02-02 15:56:59 -0500 |
commit | d23a4b3fd6ef70b80411b39b8c8bc548a219ce70 (patch) | |
tree | f42dbc4c8544acce987e35df5b3002f1def54cff /drivers | |
parent | 97bb59a03dd6767fcc00be09b0c6d9e5294eeea6 (diff) |
rbd: fix safety of rbd_put_client()
The rbd_client structure uses a kref to arrange for cleaning up and
freeing an instance when its last reference is dropped. The cleanup
routine is rbd_client_release(), and one of the things it does is
delete the rbd_client from rbd_client_list. It acquires node_lock
to do so, but the way it is done is still not safe.
The problem is that when attempting to reuse an existing rbd_client,
the structure found might already be in the process of getting
destroyed and cleaned up.
Here's the scenario, with "CLIENT" representing an existing
rbd_client that's involved in the race:
Thread on CPU A | Thread on CPU B
--------------- | ---------------
rbd_put_client(CLIENT) | rbd_get_client()
kref_put() | (acquires node_lock)
kref->refcount becomes 0 | __rbd_client_find() returns CLIENT
calls rbd_client_release() | kref_get(&CLIENT->kref);
| (releases node_lock)
(acquires node_lock) |
deletes CLIENT from list | ...and starts using CLIENT...
(releases node_lock) |
and frees CLIENT | <-- but CLIENT gets freed here
Fix this by having rbd_put_client() acquire node_lock. The result
could still be improved, but at least it avoids this problem.
Signed-off-by: Alex Elder <elder@dreamhost.com>
Signed-off-by: Sage Weil <sage@newdream.net>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/block/rbd.c | 6 |
1 files changed, 4 insertions, 2 deletions
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 7d8f8ddb3359..7f40cb4553c9 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c | |||
@@ -407,15 +407,15 @@ done_err: | |||
407 | 407 | ||
408 | /* | 408 | /* |
409 | * Destroy ceph client | 409 | * Destroy ceph client |
410 | * | ||
411 | * Caller must hold node_lock. | ||
410 | */ | 412 | */ |
411 | static void rbd_client_release(struct kref *kref) | 413 | static void rbd_client_release(struct kref *kref) |
412 | { | 414 | { |
413 | struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref); | 415 | struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref); |
414 | 416 | ||
415 | dout("rbd_release_client %p\n", rbdc); | 417 | dout("rbd_release_client %p\n", rbdc); |
416 | spin_lock(&node_lock); | ||
417 | list_del(&rbdc->node); | 418 | list_del(&rbdc->node); |
418 | spin_unlock(&node_lock); | ||
419 | 419 | ||
420 | ceph_destroy_client(rbdc->client); | 420 | ceph_destroy_client(rbdc->client); |
421 | kfree(rbdc->rbd_opts); | 421 | kfree(rbdc->rbd_opts); |
@@ -428,7 +428,9 @@ static void rbd_client_release(struct kref *kref) | |||
428 | */ | 428 | */ |
429 | static void rbd_put_client(struct rbd_device *rbd_dev) | 429 | static void rbd_put_client(struct rbd_device *rbd_dev) |
430 | { | 430 | { |
431 | spin_lock(&node_lock); | ||
431 | kref_put(&rbd_dev->rbd_client->kref, rbd_client_release); | 432 | kref_put(&rbd_dev->rbd_client->kref, rbd_client_release); |
433 | spin_unlock(&node_lock); | ||
432 | rbd_dev->rbd_client = NULL; | 434 | rbd_dev->rbd_client = NULL; |
433 | rbd_dev->client = NULL; | 435 | rbd_dev->client = NULL; |
434 | } | 436 | } |