diff options
author | Chuck Lever <cel@citi.umich.edu> | 2005-08-11 16:25:32 -0400 |
---|---|---|
committer | Trond Myklebust <Trond.Myklebust@netapp.com> | 2005-09-23 12:38:17 -0400 |
commit | 4a0f8c04f2ece949d54a0c4fd7490259cf23a58a (patch) | |
tree | 6c6e142cfa37b984dcba6f785a4f886374a307c6 /net | |
parent | b4b5cc85ed4ecbe4adbfbc4df028850de67a9f09 (diff) |
[PATCH] RPC: Rename sock_lock
Clean-up: replace a name reference to sockets in the generic parts of the RPC
client by renaming sock_lock in the rpc_xprt structure.
Test-plan:
Compile kernel with CONFIG_NFS enabled.
Version: Thu, 11 Aug 2005 16:05:00 -0400
Signed-off-by: Chuck Lever <cel@netapp.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'net')
-rw-r--r-- | net/sunrpc/xprt.c | 44 | ||||
-rw-r--r-- | net/sunrpc/xprtsock.c | 22 |
2 files changed, 33 insertions, 33 deletions
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 589195e630ef..1f0da8c1a3b0 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c | |||
@@ -106,9 +106,9 @@ xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task) | |||
106 | { | 106 | { |
107 | int retval; | 107 | int retval; |
108 | 108 | ||
109 | spin_lock_bh(&xprt->sock_lock); | 109 | spin_lock_bh(&xprt->transport_lock); |
110 | retval = __xprt_lock_write(xprt, task); | 110 | retval = __xprt_lock_write(xprt, task); |
111 | spin_unlock_bh(&xprt->sock_lock); | 111 | spin_unlock_bh(&xprt->transport_lock); |
112 | return retval; | 112 | return retval; |
113 | } | 113 | } |
114 | 114 | ||
@@ -161,9 +161,9 @@ __xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task) | |||
161 | static inline void | 161 | static inline void |
162 | xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task) | 162 | xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task) |
163 | { | 163 | { |
164 | spin_lock_bh(&xprt->sock_lock); | 164 | spin_lock_bh(&xprt->transport_lock); |
165 | __xprt_release_write(xprt, task); | 165 | __xprt_release_write(xprt, task); |
166 | spin_unlock_bh(&xprt->sock_lock); | 166 | spin_unlock_bh(&xprt->transport_lock); |
167 | } | 167 | } |
168 | 168 | ||
169 | /* | 169 | /* |
@@ -266,9 +266,9 @@ int xprt_adjust_timeout(struct rpc_rqst *req) | |||
266 | req->rq_retries = 0; | 266 | req->rq_retries = 0; |
267 | xprt_reset_majortimeo(req); | 267 | xprt_reset_majortimeo(req); |
268 | /* Reset the RTT counters == "slow start" */ | 268 | /* Reset the RTT counters == "slow start" */ |
269 | spin_lock_bh(&xprt->sock_lock); | 269 | spin_lock_bh(&xprt->transport_lock); |
270 | rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval); | 270 | rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval); |
271 | spin_unlock_bh(&xprt->sock_lock); | 271 | spin_unlock_bh(&xprt->transport_lock); |
272 | pprintk("RPC: %lu timeout\n", jiffies); | 272 | pprintk("RPC: %lu timeout\n", jiffies); |
273 | status = -ETIMEDOUT; | 273 | status = -ETIMEDOUT; |
274 | } | 274 | } |
@@ -298,10 +298,10 @@ xprt_socket_autoclose(void *args) | |||
298 | void xprt_disconnect(struct rpc_xprt *xprt) | 298 | void xprt_disconnect(struct rpc_xprt *xprt) |
299 | { | 299 | { |
300 | dprintk("RPC: disconnected transport %p\n", xprt); | 300 | dprintk("RPC: disconnected transport %p\n", xprt); |
301 | spin_lock_bh(&xprt->sock_lock); | 301 | spin_lock_bh(&xprt->transport_lock); |
302 | xprt_clear_connected(xprt); | 302 | xprt_clear_connected(xprt); |
303 | rpc_wake_up_status(&xprt->pending, -ENOTCONN); | 303 | rpc_wake_up_status(&xprt->pending, -ENOTCONN); |
304 | spin_unlock_bh(&xprt->sock_lock); | 304 | spin_unlock_bh(&xprt->transport_lock); |
305 | } | 305 | } |
306 | 306 | ||
307 | static void | 307 | static void |
@@ -309,12 +309,12 @@ xprt_init_autodisconnect(unsigned long data) | |||
309 | { | 309 | { |
310 | struct rpc_xprt *xprt = (struct rpc_xprt *)data; | 310 | struct rpc_xprt *xprt = (struct rpc_xprt *)data; |
311 | 311 | ||
312 | spin_lock(&xprt->sock_lock); | 312 | spin_lock(&xprt->transport_lock); |
313 | if (!list_empty(&xprt->recv) || xprt->shutdown) | 313 | if (!list_empty(&xprt->recv) || xprt->shutdown) |
314 | goto out_abort; | 314 | goto out_abort; |
315 | if (test_and_set_bit(XPRT_LOCKED, &xprt->sockstate)) | 315 | if (test_and_set_bit(XPRT_LOCKED, &xprt->sockstate)) |
316 | goto out_abort; | 316 | goto out_abort; |
317 | spin_unlock(&xprt->sock_lock); | 317 | spin_unlock(&xprt->transport_lock); |
318 | /* Let keventd close the socket */ | 318 | /* Let keventd close the socket */ |
319 | if (test_bit(XPRT_CONNECTING, &xprt->sockstate) != 0) | 319 | if (test_bit(XPRT_CONNECTING, &xprt->sockstate) != 0) |
320 | xprt_release_write(xprt, NULL); | 320 | xprt_release_write(xprt, NULL); |
@@ -322,7 +322,7 @@ xprt_init_autodisconnect(unsigned long data) | |||
322 | schedule_work(&xprt->task_cleanup); | 322 | schedule_work(&xprt->task_cleanup); |
323 | return; | 323 | return; |
324 | out_abort: | 324 | out_abort: |
325 | spin_unlock(&xprt->sock_lock); | 325 | spin_unlock(&xprt->transport_lock); |
326 | } | 326 | } |
327 | 327 | ||
328 | /** | 328 | /** |
@@ -482,7 +482,7 @@ xprt_timer(struct rpc_task *task) | |||
482 | struct rpc_rqst *req = task->tk_rqstp; | 482 | struct rpc_rqst *req = task->tk_rqstp; |
483 | struct rpc_xprt *xprt = req->rq_xprt; | 483 | struct rpc_xprt *xprt = req->rq_xprt; |
484 | 484 | ||
485 | spin_lock(&xprt->sock_lock); | 485 | spin_lock(&xprt->transport_lock); |
486 | if (req->rq_received) | 486 | if (req->rq_received) |
487 | goto out; | 487 | goto out; |
488 | 488 | ||
@@ -496,7 +496,7 @@ xprt_timer(struct rpc_task *task) | |||
496 | out: | 496 | out: |
497 | task->tk_timeout = 0; | 497 | task->tk_timeout = 0; |
498 | rpc_wake_up_task(task); | 498 | rpc_wake_up_task(task); |
499 | spin_unlock(&xprt->sock_lock); | 499 | spin_unlock(&xprt->transport_lock); |
500 | } | 500 | } |
501 | 501 | ||
502 | /** | 502 | /** |
@@ -515,7 +515,7 @@ int xprt_prepare_transmit(struct rpc_task *task) | |||
515 | if (xprt->shutdown) | 515 | if (xprt->shutdown) |
516 | return -EIO; | 516 | return -EIO; |
517 | 517 | ||
518 | spin_lock_bh(&xprt->sock_lock); | 518 | spin_lock_bh(&xprt->transport_lock); |
519 | if (req->rq_received && !req->rq_bytes_sent) { | 519 | if (req->rq_received && !req->rq_bytes_sent) { |
520 | err = req->rq_received; | 520 | err = req->rq_received; |
521 | goto out_unlock; | 521 | goto out_unlock; |
@@ -530,7 +530,7 @@ int xprt_prepare_transmit(struct rpc_task *task) | |||
530 | goto out_unlock; | 530 | goto out_unlock; |
531 | } | 531 | } |
532 | out_unlock: | 532 | out_unlock: |
533 | spin_unlock_bh(&xprt->sock_lock); | 533 | spin_unlock_bh(&xprt->transport_lock); |
534 | return err; | 534 | return err; |
535 | } | 535 | } |
536 | 536 | ||
@@ -552,13 +552,13 @@ void xprt_transmit(struct rpc_task *task) | |||
552 | smp_rmb(); | 552 | smp_rmb(); |
553 | if (!req->rq_received) { | 553 | if (!req->rq_received) { |
554 | if (list_empty(&req->rq_list)) { | 554 | if (list_empty(&req->rq_list)) { |
555 | spin_lock_bh(&xprt->sock_lock); | 555 | spin_lock_bh(&xprt->transport_lock); |
556 | /* Update the softirq receive buffer */ | 556 | /* Update the softirq receive buffer */ |
557 | memcpy(&req->rq_private_buf, &req->rq_rcv_buf, | 557 | memcpy(&req->rq_private_buf, &req->rq_rcv_buf, |
558 | sizeof(req->rq_private_buf)); | 558 | sizeof(req->rq_private_buf)); |
559 | /* Add request to the receive list */ | 559 | /* Add request to the receive list */ |
560 | list_add_tail(&req->rq_list, &xprt->recv); | 560 | list_add_tail(&req->rq_list, &xprt->recv); |
561 | spin_unlock_bh(&xprt->sock_lock); | 561 | spin_unlock_bh(&xprt->transport_lock); |
562 | xprt_reset_majortimeo(req); | 562 | xprt_reset_majortimeo(req); |
563 | /* Turn off autodisconnect */ | 563 | /* Turn off autodisconnect */ |
564 | del_singleshot_timer_sync(&xprt->timer); | 564 | del_singleshot_timer_sync(&xprt->timer); |
@@ -592,7 +592,7 @@ void xprt_transmit(struct rpc_task *task) | |||
592 | out_receive: | 592 | out_receive: |
593 | dprintk("RPC: %4d xmit complete\n", task->tk_pid); | 593 | dprintk("RPC: %4d xmit complete\n", task->tk_pid); |
594 | /* Set the task's receive timeout value */ | 594 | /* Set the task's receive timeout value */ |
595 | spin_lock_bh(&xprt->sock_lock); | 595 | spin_lock_bh(&xprt->transport_lock); |
596 | if (!xprt->nocong) { | 596 | if (!xprt->nocong) { |
597 | int timer = task->tk_msg.rpc_proc->p_timer; | 597 | int timer = task->tk_msg.rpc_proc->p_timer; |
598 | task->tk_timeout = rpc_calc_rto(clnt->cl_rtt, timer); | 598 | task->tk_timeout = rpc_calc_rto(clnt->cl_rtt, timer); |
@@ -607,7 +607,7 @@ void xprt_transmit(struct rpc_task *task) | |||
607 | else if (!req->rq_received) | 607 | else if (!req->rq_received) |
608 | rpc_sleep_on(&xprt->pending, task, NULL, xprt_timer); | 608 | rpc_sleep_on(&xprt->pending, task, NULL, xprt_timer); |
609 | __xprt_release_write(xprt, task); | 609 | __xprt_release_write(xprt, task); |
610 | spin_unlock_bh(&xprt->sock_lock); | 610 | spin_unlock_bh(&xprt->transport_lock); |
611 | } | 611 | } |
612 | 612 | ||
613 | static inline void do_xprt_reserve(struct rpc_task *task) | 613 | static inline void do_xprt_reserve(struct rpc_task *task) |
@@ -683,7 +683,7 @@ void xprt_release(struct rpc_task *task) | |||
683 | 683 | ||
684 | if (!(req = task->tk_rqstp)) | 684 | if (!(req = task->tk_rqstp)) |
685 | return; | 685 | return; |
686 | spin_lock_bh(&xprt->sock_lock); | 686 | spin_lock_bh(&xprt->transport_lock); |
687 | __xprt_release_write(xprt, task); | 687 | __xprt_release_write(xprt, task); |
688 | __xprt_put_cong(xprt, req); | 688 | __xprt_put_cong(xprt, req); |
689 | if (!list_empty(&req->rq_list)) | 689 | if (!list_empty(&req->rq_list)) |
@@ -692,7 +692,7 @@ void xprt_release(struct rpc_task *task) | |||
692 | if (list_empty(&xprt->recv) && !xprt->shutdown) | 692 | if (list_empty(&xprt->recv) && !xprt->shutdown) |
693 | mod_timer(&xprt->timer, | 693 | mod_timer(&xprt->timer, |
694 | xprt->last_used + RPC_IDLE_DISCONNECT_TIMEOUT); | 694 | xprt->last_used + RPC_IDLE_DISCONNECT_TIMEOUT); |
695 | spin_unlock_bh(&xprt->sock_lock); | 695 | spin_unlock_bh(&xprt->transport_lock); |
696 | task->tk_rqstp = NULL; | 696 | task->tk_rqstp = NULL; |
697 | memset(req, 0, sizeof(*req)); /* mark unused */ | 697 | memset(req, 0, sizeof(*req)); /* mark unused */ |
698 | 698 | ||
@@ -750,7 +750,7 @@ static struct rpc_xprt *xprt_setup(int proto, struct sockaddr_in *ap, struct rpc | |||
750 | return ERR_PTR(result); | 750 | return ERR_PTR(result); |
751 | } | 751 | } |
752 | 752 | ||
753 | spin_lock_init(&xprt->sock_lock); | 753 | spin_lock_init(&xprt->transport_lock); |
754 | spin_lock_init(&xprt->xprt_lock); | 754 | spin_lock_init(&xprt->xprt_lock); |
755 | init_waitqueue_head(&xprt->cong_wait); | 755 | init_waitqueue_head(&xprt->cong_wait); |
756 | 756 | ||
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index a5a04203a6b0..bc90caab6088 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
@@ -307,7 +307,7 @@ static int xs_send_request(struct rpc_task *task) | |||
307 | if (status == -EAGAIN) { | 307 | if (status == -EAGAIN) { |
308 | if (test_bit(SOCK_ASYNC_NOSPACE, &xprt->sock->flags)) { | 308 | if (test_bit(SOCK_ASYNC_NOSPACE, &xprt->sock->flags)) { |
309 | /* Protect against races with xs_write_space */ | 309 | /* Protect against races with xs_write_space */ |
310 | spin_lock_bh(&xprt->sock_lock); | 310 | spin_lock_bh(&xprt->transport_lock); |
311 | /* Don't race with disconnect */ | 311 | /* Don't race with disconnect */ |
312 | if (!xprt_connected(xprt)) | 312 | if (!xprt_connected(xprt)) |
313 | task->tk_status = -ENOTCONN; | 313 | task->tk_status = -ENOTCONN; |
@@ -315,7 +315,7 @@ static int xs_send_request(struct rpc_task *task) | |||
315 | task->tk_timeout = req->rq_timeout; | 315 | task->tk_timeout = req->rq_timeout; |
316 | rpc_sleep_on(&xprt->pending, task, NULL, NULL); | 316 | rpc_sleep_on(&xprt->pending, task, NULL, NULL); |
317 | } | 317 | } |
318 | spin_unlock_bh(&xprt->sock_lock); | 318 | spin_unlock_bh(&xprt->transport_lock); |
319 | return status; | 319 | return status; |
320 | } | 320 | } |
321 | /* Keep holding the socket if it is blocked */ | 321 | /* Keep holding the socket if it is blocked */ |
@@ -415,7 +415,7 @@ static void xs_udp_data_ready(struct sock *sk, int len) | |||
415 | goto dropit; | 415 | goto dropit; |
416 | 416 | ||
417 | /* Look up and lock the request corresponding to the given XID */ | 417 | /* Look up and lock the request corresponding to the given XID */ |
418 | spin_lock(&xprt->sock_lock); | 418 | spin_lock(&xprt->transport_lock); |
419 | rovr = xprt_lookup_rqst(xprt, *xp); | 419 | rovr = xprt_lookup_rqst(xprt, *xp); |
420 | if (!rovr) | 420 | if (!rovr) |
421 | goto out_unlock; | 421 | goto out_unlock; |
@@ -436,7 +436,7 @@ static void xs_udp_data_ready(struct sock *sk, int len) | |||
436 | xprt_complete_rqst(xprt, rovr, copied); | 436 | xprt_complete_rqst(xprt, rovr, copied); |
437 | 437 | ||
438 | out_unlock: | 438 | out_unlock: |
439 | spin_unlock(&xprt->sock_lock); | 439 | spin_unlock(&xprt->transport_lock); |
440 | dropit: | 440 | dropit: |
441 | skb_free_datagram(sk, skb); | 441 | skb_free_datagram(sk, skb); |
442 | out: | 442 | out: |
@@ -531,13 +531,13 @@ static inline void xs_tcp_read_request(struct rpc_xprt *xprt, skb_reader_t *desc | |||
531 | ssize_t r; | 531 | ssize_t r; |
532 | 532 | ||
533 | /* Find and lock the request corresponding to this xid */ | 533 | /* Find and lock the request corresponding to this xid */ |
534 | spin_lock(&xprt->sock_lock); | 534 | spin_lock(&xprt->transport_lock); |
535 | req = xprt_lookup_rqst(xprt, xprt->tcp_xid); | 535 | req = xprt_lookup_rqst(xprt, xprt->tcp_xid); |
536 | if (!req) { | 536 | if (!req) { |
537 | xprt->tcp_flags &= ~XPRT_COPY_DATA; | 537 | xprt->tcp_flags &= ~XPRT_COPY_DATA; |
538 | dprintk("RPC: XID %08x request not found!\n", | 538 | dprintk("RPC: XID %08x request not found!\n", |
539 | ntohl(xprt->tcp_xid)); | 539 | ntohl(xprt->tcp_xid)); |
540 | spin_unlock(&xprt->sock_lock); | 540 | spin_unlock(&xprt->transport_lock); |
541 | return; | 541 | return; |
542 | } | 542 | } |
543 | 543 | ||
@@ -597,7 +597,7 @@ out: | |||
597 | req->rq_task->tk_pid); | 597 | req->rq_task->tk_pid); |
598 | xprt_complete_rqst(xprt, req, xprt->tcp_copied); | 598 | xprt_complete_rqst(xprt, req, xprt->tcp_copied); |
599 | } | 599 | } |
600 | spin_unlock(&xprt->sock_lock); | 600 | spin_unlock(&xprt->transport_lock); |
601 | xs_tcp_check_recm(xprt); | 601 | xs_tcp_check_recm(xprt); |
602 | } | 602 | } |
603 | 603 | ||
@@ -696,7 +696,7 @@ static void xs_tcp_state_change(struct sock *sk) | |||
696 | 696 | ||
697 | switch (sk->sk_state) { | 697 | switch (sk->sk_state) { |
698 | case TCP_ESTABLISHED: | 698 | case TCP_ESTABLISHED: |
699 | spin_lock_bh(&xprt->sock_lock); | 699 | spin_lock_bh(&xprt->transport_lock); |
700 | if (!xprt_test_and_set_connected(xprt)) { | 700 | if (!xprt_test_and_set_connected(xprt)) { |
701 | /* Reset TCP record info */ | 701 | /* Reset TCP record info */ |
702 | xprt->tcp_offset = 0; | 702 | xprt->tcp_offset = 0; |
@@ -705,7 +705,7 @@ static void xs_tcp_state_change(struct sock *sk) | |||
705 | xprt->tcp_flags = XPRT_COPY_RECM | XPRT_COPY_XID; | 705 | xprt->tcp_flags = XPRT_COPY_RECM | XPRT_COPY_XID; |
706 | rpc_wake_up(&xprt->pending); | 706 | rpc_wake_up(&xprt->pending); |
707 | } | 707 | } |
708 | spin_unlock_bh(&xprt->sock_lock); | 708 | spin_unlock_bh(&xprt->transport_lock); |
709 | break; | 709 | break; |
710 | case TCP_SYN_SENT: | 710 | case TCP_SYN_SENT: |
711 | case TCP_SYN_RECV: | 711 | case TCP_SYN_RECV: |
@@ -753,10 +753,10 @@ static void xs_write_space(struct sock *sk) | |||
753 | if (!test_and_clear_bit(SOCK_NOSPACE, &sock->flags)) | 753 | if (!test_and_clear_bit(SOCK_NOSPACE, &sock->flags)) |
754 | goto out; | 754 | goto out; |
755 | 755 | ||
756 | spin_lock_bh(&xprt->sock_lock); | 756 | spin_lock_bh(&xprt->transport_lock); |
757 | if (xprt->snd_task) | 757 | if (xprt->snd_task) |
758 | rpc_wake_up_task(xprt->snd_task); | 758 | rpc_wake_up_task(xprt->snd_task); |
759 | spin_unlock_bh(&xprt->sock_lock); | 759 | spin_unlock_bh(&xprt->transport_lock); |
760 | out: | 760 | out: |
761 | read_unlock(&sk->sk_callback_lock); | 761 | read_unlock(&sk->sk_callback_lock); |
762 | } | 762 | } |