diff options
| author | NeilBrown <neilb@suse.de> | 2010-11-16 00:55:19 -0500 |
|---|---|---|
| committer | J. Bruce Fields <bfields@redhat.com> | 2010-12-07 20:39:55 -0500 |
| commit | ed2849d3ecfa339435818eeff28f6c3424300cec (patch) | |
| tree | 2fbef743779156c2c96afecd8311ff8488a90121 | |
| parent | cf7d7e5a1980d1116ee152d25dac382b112b9c17 (diff) | |
sunrpc: prevent use-after-free on clearing XPT_BUSY
When an xprt is created, it has a refcount of 1, and XPT_BUSY is set.
The refcount is *not* owned by the thread that created the xprt
(as is clear from the fact that creators never put the reference).
Rather, it is owned by the absence of XPT_DEAD. Once XPT_DEAD is set,
(And XPT_BUSY is clear) that initial reference is dropped and the xprt
can be freed.
So when a creator clears XPT_BUSY it is dropping its only reference and
so must not touch the xprt again.
However svc_recv, after calling ->xpo_accept (and so getting an XPT_BUSY
reference on a new xprt), calls svc_xprt_recieved. This clears
XPT_BUSY and then svc_xprt_enqueue - this last without owning a reference.
This is dangerous and has been seen to leave svc_xprt_enqueue working
with an xprt containing garbage.
So we need to hold an extra counted reference over that call to
svc_xprt_received.
For safety, any time we clear XPT_BUSY and then use the xprt again, we
first get a reference, and the put it again afterwards.
Note that svc_close_all does not need this extra protection as there are
no threads running, and the final free can only be called asynchronously
from such a thread.
Signed-off-by: NeilBrown <neilb@suse.de>
Cc: stable@kernel.org
Signed-off-by: J. Bruce Fields <bfields@redhat.com>
| -rw-r--r-- | net/sunrpc/svc_xprt.c | 9 |
1 files changed, 8 insertions, 1 deletions
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index ea2ff78dcf7..3f2c5559ca1 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c | |||
| @@ -212,6 +212,7 @@ int svc_create_xprt(struct svc_serv *serv, const char *xprt_name, | |||
| 212 | spin_lock(&svc_xprt_class_lock); | 212 | spin_lock(&svc_xprt_class_lock); |
| 213 | list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) { | 213 | list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) { |
| 214 | struct svc_xprt *newxprt; | 214 | struct svc_xprt *newxprt; |
| 215 | unsigned short newport; | ||
| 215 | 216 | ||
| 216 | if (strcmp(xprt_name, xcl->xcl_name)) | 217 | if (strcmp(xprt_name, xcl->xcl_name)) |
| 217 | continue; | 218 | continue; |
| @@ -230,8 +231,9 @@ int svc_create_xprt(struct svc_serv *serv, const char *xprt_name, | |||
| 230 | spin_lock_bh(&serv->sv_lock); | 231 | spin_lock_bh(&serv->sv_lock); |
| 231 | list_add(&newxprt->xpt_list, &serv->sv_permsocks); | 232 | list_add(&newxprt->xpt_list, &serv->sv_permsocks); |
| 232 | spin_unlock_bh(&serv->sv_lock); | 233 | spin_unlock_bh(&serv->sv_lock); |
| 234 | newport = svc_xprt_local_port(newxprt); | ||
| 233 | clear_bit(XPT_BUSY, &newxprt->xpt_flags); | 235 | clear_bit(XPT_BUSY, &newxprt->xpt_flags); |
| 234 | return svc_xprt_local_port(newxprt); | 236 | return newport; |
| 235 | } | 237 | } |
| 236 | err: | 238 | err: |
| 237 | spin_unlock(&svc_xprt_class_lock); | 239 | spin_unlock(&svc_xprt_class_lock); |
| @@ -425,8 +427,13 @@ void svc_xprt_received(struct svc_xprt *xprt) | |||
| 425 | { | 427 | { |
| 426 | BUG_ON(!test_bit(XPT_BUSY, &xprt->xpt_flags)); | 428 | BUG_ON(!test_bit(XPT_BUSY, &xprt->xpt_flags)); |
| 427 | xprt->xpt_pool = NULL; | 429 | xprt->xpt_pool = NULL; |
| 430 | /* As soon as we clear busy, the xprt could be closed and | ||
| 431 | * 'put', so we need a reference to call svc_xprt_enqueue with: | ||
| 432 | */ | ||
| 433 | svc_xprt_get(xprt); | ||
| 428 | clear_bit(XPT_BUSY, &xprt->xpt_flags); | 434 | clear_bit(XPT_BUSY, &xprt->xpt_flags); |
| 429 | svc_xprt_enqueue(xprt); | 435 | svc_xprt_enqueue(xprt); |
| 436 | svc_xprt_put(xprt); | ||
| 430 | } | 437 | } |
| 431 | EXPORT_SYMBOL_GPL(svc_xprt_received); | 438 | EXPORT_SYMBOL_GPL(svc_xprt_received); |
| 432 | 439 | ||
