aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2007-06-10 20:33:08 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2007-07-11 01:06:20 -0400
commita7ab4b501f9b8a9dc4d5cee542db67b6ccd1088b (patch)
treef84fa3241fddf8ed8320ace50fed11170ce7860b /net/ipv4
parentc716a81ab946c68a8d84022ee32eb14674e72650 (diff)
[TCPv4]: Improve BH latency in /proc/net/tcp
Currently the code for /proc/net/tcp disable BH while iterating over the entire established hash table. Even though we call cond_resched_softirq for each entry, we still won't process softirq's as regularly as we would otherwise do which results in poor performance when the system is loaded near capacity. This anomaly comes from the 2.4 code where this was all in a single function and the local_bh_disable might have made sense as a small optimisation. The cost of each local_bh_disable is so small when compared against the increased latency in keeping it disabled over a large but mostly empty TCP established hash table that we should just move it to the individual read_lock/read_unlock calls as we do in inet_diag. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/tcp_ipv4.c19
1 files changed, 5 insertions, 14 deletions
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 354721d67f6..3f5f7423b95 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -2045,10 +2045,7 @@ static void *established_get_first(struct seq_file *seq)
2045 struct hlist_node *node; 2045 struct hlist_node *node;
2046 struct inet_timewait_sock *tw; 2046 struct inet_timewait_sock *tw;
2047 2047
2048 /* We can reschedule _before_ having picked the target: */ 2048 read_lock_bh(&tcp_hashinfo.ehash[st->bucket].lock);
2049 cond_resched_softirq();
2050
2051 read_lock(&tcp_hashinfo.ehash[st->bucket].lock);
2052 sk_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) { 2049 sk_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2053 if (sk->sk_family != st->family) { 2050 if (sk->sk_family != st->family) {
2054 continue; 2051 continue;
@@ -2065,7 +2062,7 @@ static void *established_get_first(struct seq_file *seq)
2065 rc = tw; 2062 rc = tw;
2066 goto out; 2063 goto out;
2067 } 2064 }
2068 read_unlock(&tcp_hashinfo.ehash[st->bucket].lock); 2065 read_unlock_bh(&tcp_hashinfo.ehash[st->bucket].lock);
2069 st->state = TCP_SEQ_STATE_ESTABLISHED; 2066 st->state = TCP_SEQ_STATE_ESTABLISHED;
2070 } 2067 }
2071out: 2068out:
@@ -2092,14 +2089,11 @@ get_tw:
2092 cur = tw; 2089 cur = tw;
2093 goto out; 2090 goto out;
2094 } 2091 }
2095 read_unlock(&tcp_hashinfo.ehash[st->bucket].lock); 2092 read_unlock_bh(&tcp_hashinfo.ehash[st->bucket].lock);
2096 st->state = TCP_SEQ_STATE_ESTABLISHED; 2093 st->state = TCP_SEQ_STATE_ESTABLISHED;
2097 2094
2098 /* We can reschedule between buckets: */
2099 cond_resched_softirq();
2100
2101 if (++st->bucket < tcp_hashinfo.ehash_size) { 2095 if (++st->bucket < tcp_hashinfo.ehash_size) {
2102 read_lock(&tcp_hashinfo.ehash[st->bucket].lock); 2096 read_lock_bh(&tcp_hashinfo.ehash[st->bucket].lock);
2103 sk = sk_head(&tcp_hashinfo.ehash[st->bucket].chain); 2097 sk = sk_head(&tcp_hashinfo.ehash[st->bucket].chain);
2104 } else { 2098 } else {
2105 cur = NULL; 2099 cur = NULL;
@@ -2144,7 +2138,6 @@ static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2144 2138
2145 if (!rc) { 2139 if (!rc) {
2146 inet_listen_unlock(&tcp_hashinfo); 2140 inet_listen_unlock(&tcp_hashinfo);
2147 local_bh_disable();
2148 st->state = TCP_SEQ_STATE_ESTABLISHED; 2141 st->state = TCP_SEQ_STATE_ESTABLISHED;
2149 rc = established_get_idx(seq, pos); 2142 rc = established_get_idx(seq, pos);
2150 } 2143 }
@@ -2177,7 +2170,6 @@ static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2177 rc = listening_get_next(seq, v); 2170 rc = listening_get_next(seq, v);
2178 if (!rc) { 2171 if (!rc) {
2179 inet_listen_unlock(&tcp_hashinfo); 2172 inet_listen_unlock(&tcp_hashinfo);
2180 local_bh_disable();
2181 st->state = TCP_SEQ_STATE_ESTABLISHED; 2173 st->state = TCP_SEQ_STATE_ESTABLISHED;
2182 rc = established_get_first(seq); 2174 rc = established_get_first(seq);
2183 } 2175 }
@@ -2209,8 +2201,7 @@ static void tcp_seq_stop(struct seq_file *seq, void *v)
2209 case TCP_SEQ_STATE_TIME_WAIT: 2201 case TCP_SEQ_STATE_TIME_WAIT:
2210 case TCP_SEQ_STATE_ESTABLISHED: 2202 case TCP_SEQ_STATE_ESTABLISHED:
2211 if (v) 2203 if (v)
2212 read_unlock(&tcp_hashinfo.ehash[st->bucket].lock); 2204 read_unlock_bh(&tcp_hashinfo.ehash[st->bucket].lock);
2213 local_bh_enable();
2214 break; 2205 break;
2215 } 2206 }
2216} 2207}