aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/tcp.c')
-rw-r--r--net/ipv4/tcp.c156
1 files changed, 148 insertions, 8 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index c05e8edaf544..090c690627e5 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -258,6 +258,7 @@
258#include <linux/bootmem.h> 258#include <linux/bootmem.h>
259#include <linux/cache.h> 259#include <linux/cache.h>
260#include <linux/err.h> 260#include <linux/err.h>
261#include <linux/crypto.h>
261 262
262#include <net/icmp.h> 263#include <net/icmp.h>
263#include <net/tcp.h> 264#include <net/tcp.h>
@@ -462,11 +463,12 @@ static inline int forced_push(struct tcp_sock *tp)
462static inline void skb_entail(struct sock *sk, struct tcp_sock *tp, 463static inline void skb_entail(struct sock *sk, struct tcp_sock *tp,
463 struct sk_buff *skb) 464 struct sk_buff *skb)
464{ 465{
465 skb->csum = 0; 466 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
466 TCP_SKB_CB(skb)->seq = tp->write_seq; 467
467 TCP_SKB_CB(skb)->end_seq = tp->write_seq; 468 skb->csum = 0;
468 TCP_SKB_CB(skb)->flags = TCPCB_FLAG_ACK; 469 tcb->seq = tcb->end_seq = tp->write_seq;
469 TCP_SKB_CB(skb)->sacked = 0; 470 tcb->flags = TCPCB_FLAG_ACK;
471 tcb->sacked = 0;
470 skb_header_release(skb); 472 skb_header_release(skb);
471 __skb_queue_tail(&sk->sk_write_queue, skb); 473 __skb_queue_tail(&sk->sk_write_queue, skb);
472 sk_charge_skb(sk, skb); 474 sk_charge_skb(sk, skb);
@@ -1942,6 +1944,13 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
1942 } 1944 }
1943 break; 1945 break;
1944 1946
1947#ifdef CONFIG_TCP_MD5SIG
1948 case TCP_MD5SIG:
1949 /* Read the IP->Key mappings from userspace */
1950 err = tp->af_specific->md5_parse(sk, optval, optlen);
1951 break;
1952#endif
1953
1945 default: 1954 default:
1946 err = -ENOPROTOOPT; 1955 err = -ENOPROTOOPT;
1947 break; 1956 break;
@@ -2154,7 +2163,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
2154 struct tcphdr *th; 2163 struct tcphdr *th;
2155 unsigned thlen; 2164 unsigned thlen;
2156 unsigned int seq; 2165 unsigned int seq;
2157 unsigned int delta; 2166 __be32 delta;
2158 unsigned int oldlen; 2167 unsigned int oldlen;
2159 unsigned int len; 2168 unsigned int len;
2160 2169
@@ -2207,7 +2216,8 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
2207 do { 2216 do {
2208 th->fin = th->psh = 0; 2217 th->fin = th->psh = 0;
2209 2218
2210 th->check = ~csum_fold(th->check + delta); 2219 th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
2220 (__force u32)delta));
2211 if (skb->ip_summed != CHECKSUM_PARTIAL) 2221 if (skb->ip_summed != CHECKSUM_PARTIAL)
2212 th->check = csum_fold(csum_partial(skb->h.raw, thlen, 2222 th->check = csum_fold(csum_partial(skb->h.raw, thlen,
2213 skb->csum)); 2223 skb->csum));
@@ -2221,7 +2231,8 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
2221 } while (skb->next); 2231 } while (skb->next);
2222 2232
2223 delta = htonl(oldlen + (skb->tail - skb->h.raw) + skb->data_len); 2233 delta = htonl(oldlen + (skb->tail - skb->h.raw) + skb->data_len);
2224 th->check = ~csum_fold(th->check + delta); 2234 th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
2235 (__force u32)delta));
2225 if (skb->ip_summed != CHECKSUM_PARTIAL) 2236 if (skb->ip_summed != CHECKSUM_PARTIAL)
2226 th->check = csum_fold(csum_partial(skb->h.raw, thlen, 2237 th->check = csum_fold(csum_partial(skb->h.raw, thlen,
2227 skb->csum)); 2238 skb->csum));
@@ -2231,6 +2242,135 @@ out:
2231} 2242}
2232EXPORT_SYMBOL(tcp_tso_segment); 2243EXPORT_SYMBOL(tcp_tso_segment);
2233 2244
2245#ifdef CONFIG_TCP_MD5SIG
2246static unsigned long tcp_md5sig_users;
2247static struct tcp_md5sig_pool **tcp_md5sig_pool;
2248static DEFINE_SPINLOCK(tcp_md5sig_pool_lock);
2249
2250static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool **pool)
2251{
2252 int cpu;
2253 for_each_possible_cpu(cpu) {
2254 struct tcp_md5sig_pool *p = *per_cpu_ptr(pool, cpu);
2255 if (p) {
2256 if (p->md5_desc.tfm)
2257 crypto_free_hash(p->md5_desc.tfm);
2258 kfree(p);
2259 p = NULL;
2260 }
2261 }
2262 free_percpu(pool);
2263}
2264
2265void tcp_free_md5sig_pool(void)
2266{
2267 struct tcp_md5sig_pool **pool = NULL;
2268
2269 spin_lock(&tcp_md5sig_pool_lock);
2270 if (--tcp_md5sig_users == 0) {
2271 pool = tcp_md5sig_pool;
2272 tcp_md5sig_pool = NULL;
2273 }
2274 spin_unlock(&tcp_md5sig_pool_lock);
2275 if (pool)
2276 __tcp_free_md5sig_pool(pool);
2277}
2278
2279EXPORT_SYMBOL(tcp_free_md5sig_pool);
2280
2281static struct tcp_md5sig_pool **__tcp_alloc_md5sig_pool(void)
2282{
2283 int cpu;
2284 struct tcp_md5sig_pool **pool;
2285
2286 pool = alloc_percpu(struct tcp_md5sig_pool *);
2287 if (!pool)
2288 return NULL;
2289
2290 for_each_possible_cpu(cpu) {
2291 struct tcp_md5sig_pool *p;
2292 struct crypto_hash *hash;
2293
2294 p = kzalloc(sizeof(*p), GFP_KERNEL);
2295 if (!p)
2296 goto out_free;
2297 *per_cpu_ptr(pool, cpu) = p;
2298
2299 hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
2300 if (!hash || IS_ERR(hash))
2301 goto out_free;
2302
2303 p->md5_desc.tfm = hash;
2304 }
2305 return pool;
2306out_free:
2307 __tcp_free_md5sig_pool(pool);
2308 return NULL;
2309}
2310
2311struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(void)
2312{
2313 struct tcp_md5sig_pool **pool;
2314 int alloc = 0;
2315
2316retry:
2317 spin_lock(&tcp_md5sig_pool_lock);
2318 pool = tcp_md5sig_pool;
2319 if (tcp_md5sig_users++ == 0) {
2320 alloc = 1;
2321 spin_unlock(&tcp_md5sig_pool_lock);
2322 } else if (!pool) {
2323 tcp_md5sig_users--;
2324 spin_unlock(&tcp_md5sig_pool_lock);
2325 cpu_relax();
2326 goto retry;
2327 } else
2328 spin_unlock(&tcp_md5sig_pool_lock);
2329
2330 if (alloc) {
2331 /* we cannot hold spinlock here because this may sleep. */
2332 struct tcp_md5sig_pool **p = __tcp_alloc_md5sig_pool();
2333 spin_lock(&tcp_md5sig_pool_lock);
2334 if (!p) {
2335 tcp_md5sig_users--;
2336 spin_unlock(&tcp_md5sig_pool_lock);
2337 return NULL;
2338 }
2339 pool = tcp_md5sig_pool;
2340 if (pool) {
2341 /* oops, it has already been assigned. */
2342 spin_unlock(&tcp_md5sig_pool_lock);
2343 __tcp_free_md5sig_pool(p);
2344 } else {
2345 tcp_md5sig_pool = pool = p;
2346 spin_unlock(&tcp_md5sig_pool_lock);
2347 }
2348 }
2349 return pool;
2350}
2351
2352EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
2353
2354struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu)
2355{
2356 struct tcp_md5sig_pool **p;
2357 spin_lock(&tcp_md5sig_pool_lock);
2358 p = tcp_md5sig_pool;
2359 if (p)
2360 tcp_md5sig_users++;
2361 spin_unlock(&tcp_md5sig_pool_lock);
2362 return (p ? *per_cpu_ptr(p, cpu) : NULL);
2363}
2364
2365EXPORT_SYMBOL(__tcp_get_md5sig_pool);
2366
2367void __tcp_put_md5sig_pool(void) {
2368 __tcp_free_md5sig_pool(tcp_md5sig_pool);
2369}
2370
2371EXPORT_SYMBOL(__tcp_put_md5sig_pool);
2372#endif
2373
2234extern void __skb_cb_too_small_for_tcp(int, int); 2374extern void __skb_cb_too_small_for_tcp(int, int);
2235extern struct tcp_congestion_ops tcp_reno; 2375extern struct tcp_congestion_ops tcp_reno;
2236 2376