diff options
Diffstat (limited to 'net/ipv4')
-rw-r--r-- | net/ipv4/tcp.c | 10 | ||||
-rw-r--r-- | net/ipv4/tcp_ipv4.c | 7 | ||||
-rw-r--r-- | net/ipv4/tcp_minisocks.c | 2 | ||||
-rw-r--r-- | net/ipv4/tcp_output.c | 5 |
4 files changed, 13 insertions, 11 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 59f69a6c5863..edeea060db44 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -1839,7 +1839,7 @@ void tcp_close(struct sock *sk, long timeout) | |||
1839 | /* Unread data was tossed, zap the connection. */ | 1839 | /* Unread data was tossed, zap the connection. */ |
1840 | NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE); | 1840 | NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE); |
1841 | tcp_set_state(sk, TCP_CLOSE); | 1841 | tcp_set_state(sk, TCP_CLOSE); |
1842 | tcp_send_active_reset(sk, GFP_KERNEL); | 1842 | tcp_send_active_reset(sk, sk->sk_allocation); |
1843 | } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) { | 1843 | } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) { |
1844 | /* Check zero linger _after_ checking for unread data. */ | 1844 | /* Check zero linger _after_ checking for unread data. */ |
1845 | sk->sk_prot->disconnect(sk, 0); | 1845 | sk->sk_prot->disconnect(sk, 0); |
@@ -2658,7 +2658,7 @@ void tcp_free_md5sig_pool(void) | |||
2658 | 2658 | ||
2659 | EXPORT_SYMBOL(tcp_free_md5sig_pool); | 2659 | EXPORT_SYMBOL(tcp_free_md5sig_pool); |
2660 | 2660 | ||
2661 | static struct tcp_md5sig_pool **__tcp_alloc_md5sig_pool(void) | 2661 | static struct tcp_md5sig_pool **__tcp_alloc_md5sig_pool(struct sock *sk) |
2662 | { | 2662 | { |
2663 | int cpu; | 2663 | int cpu; |
2664 | struct tcp_md5sig_pool **pool; | 2664 | struct tcp_md5sig_pool **pool; |
@@ -2671,7 +2671,7 @@ static struct tcp_md5sig_pool **__tcp_alloc_md5sig_pool(void) | |||
2671 | struct tcp_md5sig_pool *p; | 2671 | struct tcp_md5sig_pool *p; |
2672 | struct crypto_hash *hash; | 2672 | struct crypto_hash *hash; |
2673 | 2673 | ||
2674 | p = kzalloc(sizeof(*p), GFP_KERNEL); | 2674 | p = kzalloc(sizeof(*p), sk->sk_allocation); |
2675 | if (!p) | 2675 | if (!p) |
2676 | goto out_free; | 2676 | goto out_free; |
2677 | *per_cpu_ptr(pool, cpu) = p; | 2677 | *per_cpu_ptr(pool, cpu) = p; |
@@ -2688,7 +2688,7 @@ out_free: | |||
2688 | return NULL; | 2688 | return NULL; |
2689 | } | 2689 | } |
2690 | 2690 | ||
2691 | struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(void) | 2691 | struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(struct sock *sk) |
2692 | { | 2692 | { |
2693 | struct tcp_md5sig_pool **pool; | 2693 | struct tcp_md5sig_pool **pool; |
2694 | int alloc = 0; | 2694 | int alloc = 0; |
@@ -2709,7 +2709,7 @@ retry: | |||
2709 | 2709 | ||
2710 | if (alloc) { | 2710 | if (alloc) { |
2711 | /* we cannot hold spinlock here because this may sleep. */ | 2711 | /* we cannot hold spinlock here because this may sleep. */ |
2712 | struct tcp_md5sig_pool **p = __tcp_alloc_md5sig_pool(); | 2712 | struct tcp_md5sig_pool **p = __tcp_alloc_md5sig_pool(sk); |
2713 | spin_lock_bh(&tcp_md5sig_pool_lock); | 2713 | spin_lock_bh(&tcp_md5sig_pool_lock); |
2714 | if (!p) { | 2714 | if (!p) { |
2715 | tcp_md5sig_users--; | 2715 | tcp_md5sig_users--; |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index ce7d3b021ffc..0543561da999 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -886,7 +886,7 @@ int tcp_v4_md5_do_add(struct sock *sk, __be32 addr, | |||
886 | } | 886 | } |
887 | sk->sk_route_caps &= ~NETIF_F_GSO_MASK; | 887 | sk->sk_route_caps &= ~NETIF_F_GSO_MASK; |
888 | } | 888 | } |
889 | if (tcp_alloc_md5sig_pool() == NULL) { | 889 | if (tcp_alloc_md5sig_pool(sk) == NULL) { |
890 | kfree(newkey); | 890 | kfree(newkey); |
891 | return -ENOMEM; | 891 | return -ENOMEM; |
892 | } | 892 | } |
@@ -1007,8 +1007,9 @@ static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval, | |||
1007 | 1007 | ||
1008 | if (!tcp_sk(sk)->md5sig_info) { | 1008 | if (!tcp_sk(sk)->md5sig_info) { |
1009 | struct tcp_sock *tp = tcp_sk(sk); | 1009 | struct tcp_sock *tp = tcp_sk(sk); |
1010 | struct tcp_md5sig_info *p = kzalloc(sizeof(*p), GFP_KERNEL); | 1010 | struct tcp_md5sig_info *p; |
1011 | 1011 | ||
1012 | p = kzalloc(sizeof(*p), sk->sk_allocation); | ||
1012 | if (!p) | 1013 | if (!p) |
1013 | return -EINVAL; | 1014 | return -EINVAL; |
1014 | 1015 | ||
@@ -1016,7 +1017,7 @@ static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval, | |||
1016 | sk->sk_route_caps &= ~NETIF_F_GSO_MASK; | 1017 | sk->sk_route_caps &= ~NETIF_F_GSO_MASK; |
1017 | } | 1018 | } |
1018 | 1019 | ||
1019 | newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL); | 1020 | newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, sk->sk_allocation); |
1020 | if (!newkey) | 1021 | if (!newkey) |
1021 | return -ENOMEM; | 1022 | return -ENOMEM; |
1022 | return tcp_v4_md5_do_add(sk, sin->sin_addr.s_addr, | 1023 | return tcp_v4_md5_do_add(sk, sin->sin_addr.s_addr, |
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 6c8b42299d9f..e48c37d74d77 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c | |||
@@ -322,7 +322,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) | |||
322 | if (key != NULL) { | 322 | if (key != NULL) { |
323 | memcpy(&tcptw->tw_md5_key, key->key, key->keylen); | 323 | memcpy(&tcptw->tw_md5_key, key->key, key->keylen); |
324 | tcptw->tw_md5_keylen = key->keylen; | 324 | tcptw->tw_md5_keylen = key->keylen; |
325 | if (tcp_alloc_md5sig_pool() == NULL) | 325 | if (tcp_alloc_md5sig_pool(sk) == NULL) |
326 | BUG(); | 326 | BUG(); |
327 | } | 327 | } |
328 | } while (0); | 328 | } while (0); |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 4e004424d400..5200aab0ca97 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -2135,7 +2135,8 @@ void tcp_send_fin(struct sock *sk) | |||
2135 | } else { | 2135 | } else { |
2136 | /* Socket is locked, keep trying until memory is available. */ | 2136 | /* Socket is locked, keep trying until memory is available. */ |
2137 | for (;;) { | 2137 | for (;;) { |
2138 | skb = alloc_skb_fclone(MAX_TCP_HEADER, GFP_KERNEL); | 2138 | skb = alloc_skb_fclone(MAX_TCP_HEADER, |
2139 | sk->sk_allocation); | ||
2139 | if (skb) | 2140 | if (skb) |
2140 | break; | 2141 | break; |
2141 | yield(); | 2142 | yield(); |
@@ -2388,7 +2389,7 @@ int tcp_connect(struct sock *sk) | |||
2388 | sk->sk_wmem_queued += buff->truesize; | 2389 | sk->sk_wmem_queued += buff->truesize; |
2389 | sk_mem_charge(sk, buff->truesize); | 2390 | sk_mem_charge(sk, buff->truesize); |
2390 | tp->packets_out += tcp_skb_pcount(buff); | 2391 | tp->packets_out += tcp_skb_pcount(buff); |
2391 | tcp_transmit_skb(sk, buff, 1, GFP_KERNEL); | 2392 | tcp_transmit_skb(sk, buff, 1, sk->sk_allocation); |
2392 | 2393 | ||
2393 | /* We change tp->snd_nxt after the tcp_transmit_skb() call | 2394 | /* We change tp->snd_nxt after the tcp_transmit_skb() call |
2394 | * in order to make this packet get counted in tcpOutSegs. | 2395 | * in order to make this packet get counted in tcpOutSegs. |