diff options
author | Trond Myklebust <Trond.Myklebust@netapp.com> | 2006-07-05 13:13:03 -0400 |
---|---|---|
committer | Trond Myklebust <Trond.Myklebust@netapp.com> | 2006-07-05 13:13:03 -0400 |
commit | 5e66dd6d66ffe758b39b6dcadf2330753ee1159b (patch) | |
tree | a72cdcff4448e4af9425cc213ddf56ab23e697fe /net/ipv4 | |
parent | 026477c1141b67e98e3bd8bdedb7d4b88a3ecd09 (diff) | |
parent | ca78f6baca863afe2e6a244a0fe94b3a70211d46 (diff) |
Merge branch 'master' of /home/trondmy/kernel/linux-2.6/
Diffstat (limited to 'net/ipv4')
-rw-r--r-- | net/ipv4/af_inet.c | 14 | ||||
-rw-r--r-- | net/ipv4/route.c | 26 | ||||
-rw-r--r-- | net/ipv4/tcp.c | 13 | ||||
-rw-r--r-- | net/ipv4/tcp_ipv4.c | 4 | ||||
-rw-r--r-- | net/ipv4/tcp_minisocks.c | 2 |
5 files changed, 42 insertions, 17 deletions
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 8d157157bf8e..318d4674faa1 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c | |||
@@ -1106,7 +1106,15 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features) | |||
1106 | int ihl; | 1106 | int ihl; |
1107 | int id; | 1107 | int id; |
1108 | 1108 | ||
1109 | if (!pskb_may_pull(skb, sizeof(*iph))) | 1109 | if (unlikely(skb_shinfo(skb)->gso_type & |
1110 | ~(SKB_GSO_TCPV4 | | ||
1111 | SKB_GSO_UDP | | ||
1112 | SKB_GSO_DODGY | | ||
1113 | SKB_GSO_TCP_ECN | | ||
1114 | 0))) | ||
1115 | goto out; | ||
1116 | |||
1117 | if (unlikely(!pskb_may_pull(skb, sizeof(*iph)))) | ||
1110 | goto out; | 1118 | goto out; |
1111 | 1119 | ||
1112 | iph = skb->nh.iph; | 1120 | iph = skb->nh.iph; |
@@ -1114,7 +1122,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features) | |||
1114 | if (ihl < sizeof(*iph)) | 1122 | if (ihl < sizeof(*iph)) |
1115 | goto out; | 1123 | goto out; |
1116 | 1124 | ||
1117 | if (!pskb_may_pull(skb, ihl)) | 1125 | if (unlikely(!pskb_may_pull(skb, ihl))) |
1118 | goto out; | 1126 | goto out; |
1119 | 1127 | ||
1120 | skb->h.raw = __skb_pull(skb, ihl); | 1128 | skb->h.raw = __skb_pull(skb, ihl); |
@@ -1125,7 +1133,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features) | |||
1125 | 1133 | ||
1126 | rcu_read_lock(); | 1134 | rcu_read_lock(); |
1127 | ops = rcu_dereference(inet_protos[proto]); | 1135 | ops = rcu_dereference(inet_protos[proto]); |
1128 | if (ops && ops->gso_segment) | 1136 | if (likely(ops && ops->gso_segment)) |
1129 | segs = ops->gso_segment(skb, features); | 1137 | segs = ops->gso_segment(skb, features); |
1130 | rcu_read_unlock(); | 1138 | rcu_read_unlock(); |
1131 | 1139 | ||
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index da44fabf4dc5..2dc6dbb28467 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -205,21 +205,27 @@ __u8 ip_tos2prio[16] = { | |||
205 | struct rt_hash_bucket { | 205 | struct rt_hash_bucket { |
206 | struct rtable *chain; | 206 | struct rtable *chain; |
207 | }; | 207 | }; |
208 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | 208 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \ |
209 | defined(CONFIG_PROVE_LOCKING) | ||
209 | /* | 210 | /* |
210 | * Instead of using one spinlock for each rt_hash_bucket, we use a table of spinlocks | 211 | * Instead of using one spinlock for each rt_hash_bucket, we use a table of spinlocks |
211 | * The size of this table is a power of two and depends on the number of CPUS. | 212 | * The size of this table is a power of two and depends on the number of CPUS. |
213 | * (on lockdep we have a quite big spinlock_t, so keep the size down there) | ||
212 | */ | 214 | */ |
213 | #if NR_CPUS >= 32 | 215 | #ifdef CONFIG_LOCKDEP |
214 | #define RT_HASH_LOCK_SZ 4096 | 216 | # define RT_HASH_LOCK_SZ 256 |
215 | #elif NR_CPUS >= 16 | ||
216 | #define RT_HASH_LOCK_SZ 2048 | ||
217 | #elif NR_CPUS >= 8 | ||
218 | #define RT_HASH_LOCK_SZ 1024 | ||
219 | #elif NR_CPUS >= 4 | ||
220 | #define RT_HASH_LOCK_SZ 512 | ||
221 | #else | 217 | #else |
222 | #define RT_HASH_LOCK_SZ 256 | 218 | # if NR_CPUS >= 32 |
219 | # define RT_HASH_LOCK_SZ 4096 | ||
220 | # elif NR_CPUS >= 16 | ||
221 | # define RT_HASH_LOCK_SZ 2048 | ||
222 | # elif NR_CPUS >= 8 | ||
223 | # define RT_HASH_LOCK_SZ 1024 | ||
224 | # elif NR_CPUS >= 4 | ||
225 | # define RT_HASH_LOCK_SZ 512 | ||
226 | # else | ||
227 | # define RT_HASH_LOCK_SZ 256 | ||
228 | # endif | ||
223 | #endif | 229 | #endif |
224 | 230 | ||
225 | static spinlock_t *rt_hash_locks; | 231 | static spinlock_t *rt_hash_locks; |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 804458712d88..f6a2d9223d07 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -2170,8 +2170,19 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features) | |||
2170 | 2170 | ||
2171 | if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) { | 2171 | if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) { |
2172 | /* Packet is from an untrusted source, reset gso_segs. */ | 2172 | /* Packet is from an untrusted source, reset gso_segs. */ |
2173 | int mss = skb_shinfo(skb)->gso_size; | 2173 | int type = skb_shinfo(skb)->gso_type; |
2174 | int mss; | ||
2175 | |||
2176 | if (unlikely(type & | ||
2177 | ~(SKB_GSO_TCPV4 | | ||
2178 | SKB_GSO_DODGY | | ||
2179 | SKB_GSO_TCP_ECN | | ||
2180 | SKB_GSO_TCPV6 | | ||
2181 | 0) || | ||
2182 | !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))) | ||
2183 | goto out; | ||
2174 | 2184 | ||
2185 | mss = skb_shinfo(skb)->gso_size; | ||
2175 | skb_shinfo(skb)->gso_segs = (skb->len + mss - 1) / mss; | 2186 | skb_shinfo(skb)->gso_segs = (skb->len + mss - 1) / mss; |
2176 | 2187 | ||
2177 | segs = NULL; | 2188 | segs = NULL; |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 8355b729fa95..5a886e6efbbe 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -90,7 +90,7 @@ static struct socket *tcp_socket; | |||
90 | void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb); | 90 | void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb); |
91 | 91 | ||
92 | struct inet_hashinfo __cacheline_aligned tcp_hashinfo = { | 92 | struct inet_hashinfo __cacheline_aligned tcp_hashinfo = { |
93 | .lhash_lock = RW_LOCK_UNLOCKED, | 93 | .lhash_lock = __RW_LOCK_UNLOCKED(tcp_hashinfo.lhash_lock), |
94 | .lhash_users = ATOMIC_INIT(0), | 94 | .lhash_users = ATOMIC_INIT(0), |
95 | .lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.lhash_wait), | 95 | .lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.lhash_wait), |
96 | }; | 96 | }; |
@@ -1090,7 +1090,7 @@ process: | |||
1090 | 1090 | ||
1091 | skb->dev = NULL; | 1091 | skb->dev = NULL; |
1092 | 1092 | ||
1093 | bh_lock_sock(sk); | 1093 | bh_lock_sock_nested(sk); |
1094 | ret = 0; | 1094 | ret = 0; |
1095 | if (!sock_owned_by_user(sk)) { | 1095 | if (!sock_owned_by_user(sk)) { |
1096 | #ifdef CONFIG_NET_DMA | 1096 | #ifdef CONFIG_NET_DMA |
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index e0851697ad5e..0ccb7cb22b15 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c | |||
@@ -40,7 +40,7 @@ int sysctl_tcp_abort_on_overflow; | |||
40 | struct inet_timewait_death_row tcp_death_row = { | 40 | struct inet_timewait_death_row tcp_death_row = { |
41 | .sysctl_max_tw_buckets = NR_FILE * 2, | 41 | .sysctl_max_tw_buckets = NR_FILE * 2, |
42 | .period = TCP_TIMEWAIT_LEN / INET_TWDR_TWKILL_SLOTS, | 42 | .period = TCP_TIMEWAIT_LEN / INET_TWDR_TWKILL_SLOTS, |
43 | .death_lock = SPIN_LOCK_UNLOCKED, | 43 | .death_lock = __SPIN_LOCK_UNLOCKED(tcp_death_row.death_lock), |
44 | .hashinfo = &tcp_hashinfo, | 44 | .hashinfo = &tcp_hashinfo, |
45 | .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0, | 45 | .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0, |
46 | (unsigned long)&tcp_death_row), | 46 | (unsigned long)&tcp_death_row), |