diff options
author | Chuck Lever <cel@netapp.com> | 2005-08-25 19:25:54 -0400 |
---|---|---|
committer | Trond Myklebust <Trond.Myklebust@netapp.com> | 2005-09-23 12:38:48 -0400 |
commit | 555ee3af161b037865793bd4bebc06b58daafde6 (patch) | |
tree | 4c72474dabffab62234db158e5b6c86ace4f9d09 /net/sunrpc/xprt.c | |
parent | ed63c003701a314c4893c11eceb9d68f8f46c662 (diff) |
[PATCH] RPC: clean up after nocong was removed
Clean-up: Move some macros that are specific to the Van Jacobson
implementation into xprt.c. Get rid of the cong_wait field in
rpc_xprt, which is no longer used. Get rid of xprt_clear_backlog.
Test-plan:
Compile with CONFIG_NFS enabled.
Signed-off-by: Chuck Lever <cel@netapp.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'net/sunrpc/xprt.c')
-rw-r--r-- | net/sunrpc/xprt.c | 29 |
1 files changed, 19 insertions, 10 deletions
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index e8d11bd6158e..0458319a1bdd 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c | |||
@@ -62,7 +62,23 @@ static inline void do_xprt_reserve(struct rpc_task *); | |||
62 | static void xprt_connect_status(struct rpc_task *task); | 62 | static void xprt_connect_status(struct rpc_task *task); |
63 | static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task *); | 63 | static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task *); |
64 | 64 | ||
65 | static int xprt_clear_backlog(struct rpc_xprt *xprt); | 65 | /* |
66 | * The transport code maintains an estimate on the maximum number of out- | ||
67 | * standing RPC requests, using a smoothed version of the congestion | ||
68 | * avoidance implemented in 44BSD. This is basically the Van Jacobson | ||
69 | * congestion algorithm: If a retransmit occurs, the congestion window is | ||
70 | * halved; otherwise, it is incremented by 1/cwnd when | ||
71 | * | ||
72 | * - a reply is received and | ||
73 | * - a full number of requests are outstanding and | ||
74 | * - the congestion window hasn't been updated recently. | ||
75 | */ | ||
76 | #define RPC_CWNDSHIFT (8U) | ||
77 | #define RPC_CWNDSCALE (1U << RPC_CWNDSHIFT) | ||
78 | #define RPC_INITCWND RPC_CWNDSCALE | ||
79 | #define RPC_MAXCWND(xprt) ((xprt)->max_reqs << RPC_CWNDSHIFT) | ||
80 | |||
81 | #define RPCXPRT_CONGESTED(xprt) ((xprt)->cong >= (xprt)->cwnd) | ||
66 | 82 | ||
67 | /** | 83 | /** |
68 | * xprt_reserve_xprt - serialize write access to transports | 84 | * xprt_reserve_xprt - serialize write access to transports |
@@ -850,7 +866,7 @@ void xprt_release(struct rpc_task *task) | |||
850 | 866 | ||
851 | spin_lock(&xprt->reserve_lock); | 867 | spin_lock(&xprt->reserve_lock); |
852 | list_add(&req->rq_list, &xprt->free); | 868 | list_add(&req->rq_list, &xprt->free); |
853 | xprt_clear_backlog(xprt); | 869 | rpc_wake_up_next(&xprt->backlog); |
854 | spin_unlock(&xprt->reserve_lock); | 870 | spin_unlock(&xprt->reserve_lock); |
855 | } | 871 | } |
856 | 872 | ||
@@ -902,7 +918,6 @@ static struct rpc_xprt *xprt_setup(int proto, struct sockaddr_in *ap, struct rpc | |||
902 | 918 | ||
903 | spin_lock_init(&xprt->transport_lock); | 919 | spin_lock_init(&xprt->transport_lock); |
904 | spin_lock_init(&xprt->reserve_lock); | 920 | spin_lock_init(&xprt->reserve_lock); |
905 | init_waitqueue_head(&xprt->cong_wait); | ||
906 | 921 | ||
907 | INIT_LIST_HEAD(&xprt->free); | 922 | INIT_LIST_HEAD(&xprt->free); |
908 | INIT_LIST_HEAD(&xprt->recv); | 923 | INIT_LIST_HEAD(&xprt->recv); |
@@ -911,6 +926,7 @@ static struct rpc_xprt *xprt_setup(int proto, struct sockaddr_in *ap, struct rpc | |||
911 | xprt->timer.function = xprt_init_autodisconnect; | 926 | xprt->timer.function = xprt_init_autodisconnect; |
912 | xprt->timer.data = (unsigned long) xprt; | 927 | xprt->timer.data = (unsigned long) xprt; |
913 | xprt->last_used = jiffies; | 928 | xprt->last_used = jiffies; |
929 | xprt->cwnd = RPC_INITCWND; | ||
914 | 930 | ||
915 | rpc_init_wait_queue(&xprt->pending, "xprt_pending"); | 931 | rpc_init_wait_queue(&xprt->pending, "xprt_pending"); |
916 | rpc_init_wait_queue(&xprt->sending, "xprt_sending"); | 932 | rpc_init_wait_queue(&xprt->sending, "xprt_sending"); |
@@ -955,16 +971,9 @@ static void xprt_shutdown(struct rpc_xprt *xprt) | |||
955 | rpc_wake_up(&xprt->resend); | 971 | rpc_wake_up(&xprt->resend); |
956 | xprt_wake_pending_tasks(xprt, -EIO); | 972 | xprt_wake_pending_tasks(xprt, -EIO); |
957 | rpc_wake_up(&xprt->backlog); | 973 | rpc_wake_up(&xprt->backlog); |
958 | wake_up(&xprt->cong_wait); | ||
959 | del_timer_sync(&xprt->timer); | 974 | del_timer_sync(&xprt->timer); |
960 | } | 975 | } |
961 | 976 | ||
962 | static int xprt_clear_backlog(struct rpc_xprt *xprt) { | ||
963 | rpc_wake_up_next(&xprt->backlog); | ||
964 | wake_up(&xprt->cong_wait); | ||
965 | return 1; | ||
966 | } | ||
967 | |||
968 | /** | 977 | /** |
969 | * xprt_destroy - destroy an RPC transport, killing off all requests. | 978 | * xprt_destroy - destroy an RPC transport, killing off all requests. |
970 | * @xprt: transport to destroy | 979 | * @xprt: transport to destroy |