diff options
Diffstat (limited to 'net/sunrpc/svcsock.c')
| -rw-r--r-- | net/sunrpc/svcsock.c | 94 |
1 files changed, 48 insertions, 46 deletions
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index c47bede754ea..d95a0c894d4f 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c | |||
| @@ -5,7 +5,7 @@ | |||
| 5 | * | 5 | * |
| 6 | * The server scheduling algorithm does not always distribute the load | 6 | * The server scheduling algorithm does not always distribute the load |
| 7 | * evenly when servicing a single client. May need to modify the | 7 | * evenly when servicing a single client. May need to modify the |
| 8 | * svc_sock_enqueue procedure... | 8 | * svc_xprt_enqueue procedure... |
| 9 | * | 9 | * |
| 10 | * TCP support is largely untested and may be a little slow. The problem | 10 | * TCP support is largely untested and may be a little slow. The problem |
| 11 | * is that we currently do two separate recvfrom's, one for the 4-byte | 11 | * is that we currently do two separate recvfrom's, one for the 4-byte |
| @@ -63,7 +63,7 @@ | |||
| 63 | * providing that certain rules are followed: | 63 | * providing that certain rules are followed: |
| 64 | * | 64 | * |
| 65 | * XPT_CONN, XPT_DATA, can be set or cleared at any time. | 65 | * XPT_CONN, XPT_DATA, can be set or cleared at any time. |
| 66 | * after a set, svc_sock_enqueue must be called. | 66 | * after a set, svc_xprt_enqueue must be called. |
| 67 | * after a clear, the socket must be read/accepted | 67 | * after a clear, the socket must be read/accepted |
| 68 | * if this succeeds, it must be set again. | 68 | * if this succeeds, it must be set again. |
| 69 | * XPT_CLOSE can set at any time. It is never cleared. | 69 | * XPT_CLOSE can set at any time. It is never cleared. |
| @@ -212,22 +212,21 @@ static void svc_release_skb(struct svc_rqst *rqstp) | |||
| 212 | * processes, wake 'em up. | 212 | * processes, wake 'em up. |
| 213 | * | 213 | * |
| 214 | */ | 214 | */ |
| 215 | static void | 215 | void svc_xprt_enqueue(struct svc_xprt *xprt) |
| 216 | svc_sock_enqueue(struct svc_sock *svsk) | ||
| 217 | { | 216 | { |
| 218 | struct svc_serv *serv = svsk->sk_xprt.xpt_server; | 217 | struct svc_serv *serv = xprt->xpt_server; |
| 219 | struct svc_pool *pool; | 218 | struct svc_pool *pool; |
| 220 | struct svc_rqst *rqstp; | 219 | struct svc_rqst *rqstp; |
| 221 | int cpu; | 220 | int cpu; |
| 222 | 221 | ||
| 223 | if (!(svsk->sk_xprt.xpt_flags & | 222 | if (!(xprt->xpt_flags & |
| 224 | ((1<<XPT_CONN)|(1<<XPT_DATA)|(1<<XPT_CLOSE)|(1<<XPT_DEFERRED)))) | 223 | ((1<<XPT_CONN)|(1<<XPT_DATA)|(1<<XPT_CLOSE)|(1<<XPT_DEFERRED)))) |
| 225 | return; | 224 | return; |
| 226 | if (test_bit(XPT_DEAD, &svsk->sk_xprt.xpt_flags)) | 225 | if (test_bit(XPT_DEAD, &xprt->xpt_flags)) |
| 227 | return; | 226 | return; |
| 228 | 227 | ||
| 229 | cpu = get_cpu(); | 228 | cpu = get_cpu(); |
| 230 | pool = svc_pool_for_cpu(svsk->sk_xprt.xpt_server, cpu); | 229 | pool = svc_pool_for_cpu(xprt->xpt_server, cpu); |
| 231 | put_cpu(); | 230 | put_cpu(); |
| 232 | 231 | ||
| 233 | spin_lock_bh(&pool->sp_lock); | 232 | spin_lock_bh(&pool->sp_lock); |
| @@ -235,11 +234,12 @@ svc_sock_enqueue(struct svc_sock *svsk) | |||
| 235 | if (!list_empty(&pool->sp_threads) && | 234 | if (!list_empty(&pool->sp_threads) && |
| 236 | !list_empty(&pool->sp_sockets)) | 235 | !list_empty(&pool->sp_sockets)) |
| 237 | printk(KERN_ERR | 236 | printk(KERN_ERR |
| 238 | "svc_sock_enqueue: threads and sockets both waiting??\n"); | 237 | "svc_xprt_enqueue: " |
| 238 | "threads and transports both waiting??\n"); | ||
| 239 | 239 | ||
| 240 | if (test_bit(XPT_DEAD, &svsk->sk_xprt.xpt_flags)) { | 240 | if (test_bit(XPT_DEAD, &xprt->xpt_flags)) { |
| 241 | /* Don't enqueue dead sockets */ | 241 | /* Don't enqueue dead sockets */ |
| 242 | dprintk("svc: socket %p is dead, not enqueued\n", svsk->sk_sk); | 242 | dprintk("svc: transport %p is dead, not enqueued\n", xprt); |
| 243 | goto out_unlock; | 243 | goto out_unlock; |
| 244 | } | 244 | } |
| 245 | 245 | ||
| @@ -248,28 +248,29 @@ svc_sock_enqueue(struct svc_sock *svsk) | |||
| 248 | * on the idle list. We update XPT_BUSY atomically because | 248 | * on the idle list. We update XPT_BUSY atomically because |
| 249 | * it also guards against trying to enqueue the svc_sock twice. | 249 | * it also guards against trying to enqueue the svc_sock twice. |
| 250 | */ | 250 | */ |
| 251 | if (test_and_set_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags)) { | 251 | if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) { |
| 252 | /* Don't enqueue socket while already enqueued */ | 252 | /* Don't enqueue socket while already enqueued */ |
| 253 | dprintk("svc: socket %p busy, not enqueued\n", svsk->sk_sk); | 253 | dprintk("svc: transport %p busy, not enqueued\n", xprt); |
| 254 | goto out_unlock; | 254 | goto out_unlock; |
| 255 | } | 255 | } |
| 256 | BUG_ON(svsk->sk_xprt.xpt_pool != NULL); | 256 | BUG_ON(xprt->xpt_pool != NULL); |
| 257 | svsk->sk_xprt.xpt_pool = pool; | 257 | xprt->xpt_pool = pool; |
| 258 | 258 | ||
| 259 | /* Handle pending connection */ | 259 | /* Handle pending connection */ |
| 260 | if (test_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags)) | 260 | if (test_bit(XPT_CONN, &xprt->xpt_flags)) |
| 261 | goto process; | 261 | goto process; |
| 262 | 262 | ||
| 263 | /* Handle close in-progress */ | 263 | /* Handle close in-progress */ |
| 264 | if (test_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags)) | 264 | if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) |
| 265 | goto process; | 265 | goto process; |
| 266 | 266 | ||
| 267 | /* Check if we have space to reply to a request */ | 267 | /* Check if we have space to reply to a request */ |
| 268 | if (!svsk->sk_xprt.xpt_ops->xpo_has_wspace(&svsk->sk_xprt)) { | 268 | if (!xprt->xpt_ops->xpo_has_wspace(xprt)) { |
| 269 | /* Don't enqueue while not enough space for reply */ | 269 | /* Don't enqueue while not enough space for reply */ |
| 270 | dprintk("svc: no write space, socket %p not enqueued\n", svsk); | 270 | dprintk("svc: no write space, transport %p not enqueued\n", |
| 271 | svsk->sk_xprt.xpt_pool = NULL; | 271 | xprt); |
| 272 | clear_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags); | 272 | xprt->xpt_pool = NULL; |
| 273 | clear_bit(XPT_BUSY, &xprt->xpt_flags); | ||
| 273 | goto out_unlock; | 274 | goto out_unlock; |
| 274 | } | 275 | } |
| 275 | 276 | ||
| @@ -278,28 +279,29 @@ svc_sock_enqueue(struct svc_sock *svsk) | |||
| 278 | rqstp = list_entry(pool->sp_threads.next, | 279 | rqstp = list_entry(pool->sp_threads.next, |
| 279 | struct svc_rqst, | 280 | struct svc_rqst, |
| 280 | rq_list); | 281 | rq_list); |
| 281 | dprintk("svc: socket %p served by daemon %p\n", | 282 | dprintk("svc: transport %p served by daemon %p\n", |
| 282 | svsk->sk_sk, rqstp); | 283 | xprt, rqstp); |
| 283 | svc_thread_dequeue(pool, rqstp); | 284 | svc_thread_dequeue(pool, rqstp); |
| 284 | if (rqstp->rq_sock) | 285 | if (rqstp->rq_xprt) |
| 285 | printk(KERN_ERR | 286 | printk(KERN_ERR |
| 286 | "svc_sock_enqueue: server %p, rq_sock=%p!\n", | 287 | "svc_xprt_enqueue: server %p, rq_xprt=%p!\n", |
| 287 | rqstp, rqstp->rq_sock); | 288 | rqstp, rqstp->rq_xprt); |
| 288 | rqstp->rq_sock = svsk; | 289 | rqstp->rq_xprt = xprt; |
| 289 | svc_xprt_get(&svsk->sk_xprt); | 290 | svc_xprt_get(xprt); |
| 290 | rqstp->rq_reserved = serv->sv_max_mesg; | 291 | rqstp->rq_reserved = serv->sv_max_mesg; |
| 291 | atomic_add(rqstp->rq_reserved, &svsk->sk_xprt.xpt_reserved); | 292 | atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); |
| 292 | BUG_ON(svsk->sk_xprt.xpt_pool != pool); | 293 | BUG_ON(xprt->xpt_pool != pool); |
| 293 | wake_up(&rqstp->rq_wait); | 294 | wake_up(&rqstp->rq_wait); |
| 294 | } else { | 295 | } else { |
| 295 | dprintk("svc: socket %p put into queue\n", svsk->sk_sk); | 296 | dprintk("svc: transport %p put into queue\n", xprt); |
| 296 | list_add_tail(&svsk->sk_xprt.xpt_ready, &pool->sp_sockets); | 297 | list_add_tail(&xprt->xpt_ready, &pool->sp_sockets); |
| 297 | BUG_ON(svsk->sk_xprt.xpt_pool != pool); | 298 | BUG_ON(xprt->xpt_pool != pool); |
| 298 | } | 299 | } |
| 299 | 300 | ||
| 300 | out_unlock: | 301 | out_unlock: |
| 301 | spin_unlock_bh(&pool->sp_lock); | 302 | spin_unlock_bh(&pool->sp_lock); |
| 302 | } | 303 | } |
| 304 | EXPORT_SYMBOL_GPL(svc_xprt_enqueue); | ||
| 303 | 305 | ||
| 304 | /* | 306 | /* |
| 305 | * Dequeue the first socket. Must be called with the pool->sp_lock held. | 307 | * Dequeue the first socket. Must be called with the pool->sp_lock held. |
| @@ -333,7 +335,7 @@ svc_sock_received(struct svc_sock *svsk) | |||
| 333 | { | 335 | { |
| 334 | svsk->sk_xprt.xpt_pool = NULL; | 336 | svsk->sk_xprt.xpt_pool = NULL; |
| 335 | clear_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags); | 337 | clear_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags); |
| 336 | svc_sock_enqueue(svsk); | 338 | svc_xprt_enqueue(&svsk->sk_xprt); |
| 337 | } | 339 | } |
| 338 | 340 | ||
| 339 | 341 | ||
| @@ -352,11 +354,11 @@ void svc_reserve(struct svc_rqst *rqstp, int space) | |||
| 352 | space += rqstp->rq_res.head[0].iov_len; | 354 | space += rqstp->rq_res.head[0].iov_len; |
| 353 | 355 | ||
| 354 | if (space < rqstp->rq_reserved) { | 356 | if (space < rqstp->rq_reserved) { |
| 355 | struct svc_sock *svsk = rqstp->rq_sock; | 357 | struct svc_xprt *xprt = rqstp->rq_xprt; |
| 356 | atomic_sub((rqstp->rq_reserved - space), &svsk->sk_xprt.xpt_reserved); | 358 | atomic_sub((rqstp->rq_reserved - space), &xprt->xpt_reserved); |
| 357 | rqstp->rq_reserved = space; | 359 | rqstp->rq_reserved = space; |
| 358 | 360 | ||
| 359 | svc_sock_enqueue(svsk); | 361 | svc_xprt_enqueue(xprt); |
| 360 | } | 362 | } |
| 361 | } | 363 | } |
| 362 | 364 | ||
| @@ -684,7 +686,7 @@ svc_udp_data_ready(struct sock *sk, int count) | |||
| 684 | svsk, sk, count, | 686 | svsk, sk, count, |
| 685 | test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags)); | 687 | test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags)); |
| 686 | set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); | 688 | set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); |
| 687 | svc_sock_enqueue(svsk); | 689 | svc_xprt_enqueue(&svsk->sk_xprt); |
| 688 | } | 690 | } |
| 689 | if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) | 691 | if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) |
| 690 | wake_up_interruptible(sk->sk_sleep); | 692 | wake_up_interruptible(sk->sk_sleep); |
| @@ -701,7 +703,7 @@ svc_write_space(struct sock *sk) | |||
| 701 | if (svsk) { | 703 | if (svsk) { |
| 702 | dprintk("svc: socket %p(inet %p), write_space busy=%d\n", | 704 | dprintk("svc: socket %p(inet %p), write_space busy=%d\n", |
| 703 | svsk, sk, test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags)); | 705 | svsk, sk, test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags)); |
| 704 | svc_sock_enqueue(svsk); | 706 | svc_xprt_enqueue(&svsk->sk_xprt); |
| 705 | } | 707 | } |
| 706 | 708 | ||
| 707 | if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) { | 709 | if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) { |
| @@ -973,7 +975,7 @@ svc_tcp_listen_data_ready(struct sock *sk, int count_unused) | |||
| 973 | if (sk->sk_state == TCP_LISTEN) { | 975 | if (sk->sk_state == TCP_LISTEN) { |
| 974 | if (svsk) { | 976 | if (svsk) { |
| 975 | set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags); | 977 | set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags); |
| 976 | svc_sock_enqueue(svsk); | 978 | svc_xprt_enqueue(&svsk->sk_xprt); |
| 977 | } else | 979 | } else |
| 978 | printk("svc: socket %p: no user data\n", sk); | 980 | printk("svc: socket %p: no user data\n", sk); |
| 979 | } | 981 | } |
| @@ -997,7 +999,7 @@ svc_tcp_state_change(struct sock *sk) | |||
| 997 | printk("svc: socket %p: no user data\n", sk); | 999 | printk("svc: socket %p: no user data\n", sk); |
| 998 | else { | 1000 | else { |
| 999 | set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags); | 1001 | set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags); |
| 1000 | svc_sock_enqueue(svsk); | 1002 | svc_xprt_enqueue(&svsk->sk_xprt); |
| 1001 | } | 1003 | } |
| 1002 | if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) | 1004 | if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) |
| 1003 | wake_up_interruptible_all(sk->sk_sleep); | 1005 | wake_up_interruptible_all(sk->sk_sleep); |
| @@ -1012,7 +1014,7 @@ svc_tcp_data_ready(struct sock *sk, int count) | |||
| 1012 | sk, sk->sk_user_data); | 1014 | sk, sk->sk_user_data); |
| 1013 | if (svsk) { | 1015 | if (svsk) { |
| 1014 | set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); | 1016 | set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); |
| 1015 | svc_sock_enqueue(svsk); | 1017 | svc_xprt_enqueue(&svsk->sk_xprt); |
| 1016 | } | 1018 | } |
| 1017 | if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) | 1019 | if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) |
| 1018 | wake_up_interruptible(sk->sk_sleep); | 1020 | wake_up_interruptible(sk->sk_sleep); |
| @@ -1298,7 +1300,7 @@ svc_tcp_sendto(struct svc_rqst *rqstp) | |||
| 1298 | (sent<0)?"got error":"sent only", | 1300 | (sent<0)?"got error":"sent only", |
| 1299 | sent, xbufp->len); | 1301 | sent, xbufp->len); |
| 1300 | set_bit(XPT_CLOSE, &rqstp->rq_sock->sk_xprt.xpt_flags); | 1302 | set_bit(XPT_CLOSE, &rqstp->rq_sock->sk_xprt.xpt_flags); |
| 1301 | svc_sock_enqueue(rqstp->rq_sock); | 1303 | svc_xprt_enqueue(rqstp->rq_xprt); |
| 1302 | sent = -EAGAIN; | 1304 | sent = -EAGAIN; |
| 1303 | } | 1305 | } |
| 1304 | return sent; | 1306 | return sent; |
| @@ -1476,7 +1478,7 @@ static void svc_check_conn_limits(struct svc_serv *serv) | |||
| 1476 | spin_unlock_bh(&serv->sv_lock); | 1478 | spin_unlock_bh(&serv->sv_lock); |
| 1477 | 1479 | ||
| 1478 | if (svsk) { | 1480 | if (svsk) { |
| 1479 | svc_sock_enqueue(svsk); | 1481 | svc_xprt_enqueue(&svsk->sk_xprt); |
| 1480 | svc_xprt_put(&svsk->sk_xprt); | 1482 | svc_xprt_put(&svsk->sk_xprt); |
| 1481 | } | 1483 | } |
| 1482 | } | 1484 | } |
| @@ -1709,7 +1711,7 @@ svc_age_temp_sockets(unsigned long closure) | |||
| 1709 | svsk, get_seconds() - svsk->sk_lastrecv); | 1711 | svsk, get_seconds() - svsk->sk_lastrecv); |
| 1710 | 1712 | ||
| 1711 | /* a thread will dequeue and close it soon */ | 1713 | /* a thread will dequeue and close it soon */ |
| 1712 | svc_sock_enqueue(svsk); | 1714 | svc_xprt_enqueue(&svsk->sk_xprt); |
| 1713 | svc_xprt_put(&svsk->sk_xprt); | 1715 | svc_xprt_put(&svsk->sk_xprt); |
| 1714 | } | 1716 | } |
| 1715 | 1717 | ||
| @@ -1991,7 +1993,7 @@ static void svc_revisit(struct cache_deferred_req *dreq, int too_many) | |||
| 1991 | list_add(&dr->handle.recent, &svsk->sk_deferred); | 1993 | list_add(&dr->handle.recent, &svsk->sk_deferred); |
| 1992 | spin_unlock(&svsk->sk_lock); | 1994 | spin_unlock(&svsk->sk_lock); |
| 1993 | set_bit(XPT_DEFERRED, &svsk->sk_xprt.xpt_flags); | 1995 | set_bit(XPT_DEFERRED, &svsk->sk_xprt.xpt_flags); |
| 1994 | svc_sock_enqueue(svsk); | 1996 | svc_xprt_enqueue(&svsk->sk_xprt); |
| 1995 | svc_xprt_put(&svsk->sk_xprt); | 1997 | svc_xprt_put(&svsk->sk_xprt); |
| 1996 | } | 1998 | } |
| 1997 | 1999 | ||
