diff options
author | Tom Tucker <tom@opengridcomputing.com> | 2007-12-30 22:07:40 -0500 |
---|---|---|
committer | J. Bruce Fields <bfields@citi.umich.edu> | 2008-02-01 16:42:09 -0500 |
commit | f9f3cc4fae04c87c815a4b473fb577cf74ef27da (patch) | |
tree | 906550a4f9db0bf79adea43b3b9ac1fbc2b2c4e6 /net/sunrpc/svcsock.c | |
parent | 44a6995b32eb9b021ee71b279edb84728c9f5160 (diff) |
svc: Move connection limit checking to its own function
Move the code that poaches connections when the connection limit is hit
to a subroutine to make the accept logic path easier to follow. Since this
is in the new connection path, it should not be a performance issue.
Signed-off-by: Tom Tucker <tom@opengridcomputing.com>
Acked-by: Neil Brown <neilb@suse.de>
Reviewed-by: Chuck Lever <chuck.lever@oracle.com>
Reviewed-by: Greg Banks <gnb@sgi.com>
Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
Diffstat (limited to 'net/sunrpc/svcsock.c')
-rw-r--r-- | net/sunrpc/svcsock.c | 57 |
1 files changed, 29 insertions, 28 deletions
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 962dbf43a728..6e9dc8f96495 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c | |||
@@ -1105,17 +1105,30 @@ static struct svc_xprt *svc_tcp_accept(struct svc_xprt *xprt) | |||
1105 | 1105 | ||
1106 | svc_sock_received(newsvsk); | 1106 | svc_sock_received(newsvsk); |
1107 | 1107 | ||
1108 | /* make sure that we don't have too many active connections. | 1108 | if (serv->sv_stats) |
1109 | * If we have, something must be dropped. | 1109 | serv->sv_stats->nettcpconn++; |
1110 | * | 1110 | |
1111 | * There's no point in trying to do random drop here for | 1111 | return &newsvsk->sk_xprt; |
1112 | * DoS prevention. The NFS clients does 1 reconnect in 15 | 1112 | |
1113 | * seconds. An attacker can easily beat that. | 1113 | failed: |
1114 | * | 1114 | sock_release(newsock); |
1115 | * The only somewhat efficient mechanism would be if drop | 1115 | return NULL; |
1116 | * old connections from the same IP first. But right now | 1116 | } |
1117 | * we don't even record the client IP in svc_sock. | 1117 | |
1118 | */ | 1118 | /* |
1119 | * Make sure that we don't have too many active connections. If we | ||
1120 | * have, something must be dropped. | ||
1121 | * | ||
1122 | * There's no point in trying to do random drop here for DoS | ||
1123 | * prevention. The NFS clients does 1 reconnect in 15 seconds. An | ||
1124 | * attacker can easily beat that. | ||
1125 | * | ||
1126 | * The only somewhat efficient mechanism would be if drop old | ||
1127 | * connections from the same IP first. But right now we don't even | ||
1128 | * record the client IP in svc_sock. | ||
1129 | */ | ||
1130 | static void svc_check_conn_limits(struct svc_serv *serv) | ||
1131 | { | ||
1119 | if (serv->sv_tmpcnt > (serv->sv_nrthreads+3)*20) { | 1132 | if (serv->sv_tmpcnt > (serv->sv_nrthreads+3)*20) { |
1120 | struct svc_sock *svsk = NULL; | 1133 | struct svc_sock *svsk = NULL; |
1121 | spin_lock_bh(&serv->sv_lock); | 1134 | spin_lock_bh(&serv->sv_lock); |
@@ -1123,13 +1136,9 @@ static struct svc_xprt *svc_tcp_accept(struct svc_xprt *xprt) | |||
1123 | if (net_ratelimit()) { | 1136 | if (net_ratelimit()) { |
1124 | /* Try to help the admin */ | 1137 | /* Try to help the admin */ |
1125 | printk(KERN_NOTICE "%s: too many open TCP " | 1138 | printk(KERN_NOTICE "%s: too many open TCP " |
1126 | "sockets, consider increasing the " | 1139 | "sockets, consider increasing the " |
1127 | "number of nfsd threads\n", | 1140 | "number of nfsd threads\n", |
1128 | serv->sv_name); | 1141 | serv->sv_name); |
1129 | printk(KERN_NOTICE | ||
1130 | "%s: last TCP connect from %s\n", | ||
1131 | serv->sv_name, __svc_print_addr(sin, | ||
1132 | buf, sizeof(buf))); | ||
1133 | } | 1142 | } |
1134 | /* | 1143 | /* |
1135 | * Always select the oldest socket. It's not fair, | 1144 | * Always select the oldest socket. It's not fair, |
@@ -1147,17 +1156,7 @@ static struct svc_xprt *svc_tcp_accept(struct svc_xprt *xprt) | |||
1147 | svc_sock_enqueue(svsk); | 1156 | svc_sock_enqueue(svsk); |
1148 | svc_sock_put(svsk); | 1157 | svc_sock_put(svsk); |
1149 | } | 1158 | } |
1150 | |||
1151 | } | 1159 | } |
1152 | |||
1153 | if (serv->sv_stats) | ||
1154 | serv->sv_stats->nettcpconn++; | ||
1155 | |||
1156 | return &newsvsk->sk_xprt; | ||
1157 | |||
1158 | failed: | ||
1159 | sock_release(newsock); | ||
1160 | return NULL; | ||
1161 | } | 1160 | } |
1162 | 1161 | ||
1163 | /* | 1162 | /* |
@@ -1574,6 +1573,8 @@ svc_recv(struct svc_rqst *rqstp, long timeout) | |||
1574 | } else if (test_bit(SK_LISTENER, &svsk->sk_flags)) { | 1573 | } else if (test_bit(SK_LISTENER, &svsk->sk_flags)) { |
1575 | struct svc_xprt *newxpt; | 1574 | struct svc_xprt *newxpt; |
1576 | newxpt = svsk->sk_xprt.xpt_ops->xpo_accept(&svsk->sk_xprt); | 1575 | newxpt = svsk->sk_xprt.xpt_ops->xpo_accept(&svsk->sk_xprt); |
1576 | if (newxpt) | ||
1577 | svc_check_conn_limits(svsk->sk_server); | ||
1577 | svc_sock_received(svsk); | 1578 | svc_sock_received(svsk); |
1578 | } else { | 1579 | } else { |
1579 | dprintk("svc: server %p, pool %u, socket %p, inuse=%d\n", | 1580 | dprintk("svc: server %p, pool %u, socket %p, inuse=%d\n", |