diff options
Diffstat (limited to 'net/ipv4')
-rw-r--r-- | net/ipv4/Makefile | 2 | ||||
-rw-r--r-- | net/ipv4/sysctl_net_ipv4.c | 4 | ||||
-rw-r--r-- | net/ipv4/tcp_input.c | 21 | ||||
-rw-r--r-- | net/ipv4/tcp_ipv4.c | 2 | ||||
-rw-r--r-- | net/ipv4/tcp_output.c | 12 |
5 files changed, 22 insertions, 19 deletions
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile index ae2ccf2890e4..15ca63ec604e 100644 --- a/net/ipv4/Makefile +++ b/net/ipv4/Makefile | |||
@@ -49,7 +49,7 @@ obj-$(CONFIG_TCP_CONG_SCALABLE) += tcp_scalable.o | |||
49 | obj-$(CONFIG_TCP_CONG_LP) += tcp_lp.o | 49 | obj-$(CONFIG_TCP_CONG_LP) += tcp_lp.o |
50 | obj-$(CONFIG_TCP_CONG_YEAH) += tcp_yeah.o | 50 | obj-$(CONFIG_TCP_CONG_YEAH) += tcp_yeah.o |
51 | obj-$(CONFIG_TCP_CONG_ILLINOIS) += tcp_illinois.o | 51 | obj-$(CONFIG_TCP_CONG_ILLINOIS) += tcp_illinois.o |
52 | obj-$(CONFIG_CGROUP_MEM_RES_CTLR_KMEM) += tcp_memcontrol.o | 52 | obj-$(CONFIG_MEMCG_KMEM) += tcp_memcontrol.o |
53 | obj-$(CONFIG_NETLABEL) += cipso_ipv4.o | 53 | obj-$(CONFIG_NETLABEL) += cipso_ipv4.o |
54 | 54 | ||
55 | obj-$(CONFIG_XFRM) += xfrm4_policy.o xfrm4_state.o xfrm4_input.o \ | 55 | obj-$(CONFIG_XFRM) += xfrm4_policy.o xfrm4_state.o xfrm4_input.o \ |
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 4b6487a68279..1b5ce96707a3 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c | |||
@@ -184,7 +184,7 @@ static int ipv4_tcp_mem(ctl_table *ctl, int write, | |||
184 | int ret; | 184 | int ret; |
185 | unsigned long vec[3]; | 185 | unsigned long vec[3]; |
186 | struct net *net = current->nsproxy->net_ns; | 186 | struct net *net = current->nsproxy->net_ns; |
187 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM | 187 | #ifdef CONFIG_MEMCG_KMEM |
188 | struct mem_cgroup *memcg; | 188 | struct mem_cgroup *memcg; |
189 | #endif | 189 | #endif |
190 | 190 | ||
@@ -203,7 +203,7 @@ static int ipv4_tcp_mem(ctl_table *ctl, int write, | |||
203 | if (ret) | 203 | if (ret) |
204 | return ret; | 204 | return ret; |
205 | 205 | ||
206 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM | 206 | #ifdef CONFIG_MEMCG_KMEM |
207 | rcu_read_lock(); | 207 | rcu_read_lock(); |
208 | memcg = mem_cgroup_from_task(current); | 208 | memcg = mem_cgroup_from_task(current); |
209 | 209 | ||
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 9be30b039ae3..2fd2bc9e3c64 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -4351,19 +4351,20 @@ static void tcp_ofo_queue(struct sock *sk) | |||
4351 | static bool tcp_prune_ofo_queue(struct sock *sk); | 4351 | static bool tcp_prune_ofo_queue(struct sock *sk); |
4352 | static int tcp_prune_queue(struct sock *sk); | 4352 | static int tcp_prune_queue(struct sock *sk); |
4353 | 4353 | ||
4354 | static int tcp_try_rmem_schedule(struct sock *sk, unsigned int size) | 4354 | static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb, |
4355 | unsigned int size) | ||
4355 | { | 4356 | { |
4356 | if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || | 4357 | if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || |
4357 | !sk_rmem_schedule(sk, size)) { | 4358 | !sk_rmem_schedule(sk, skb, size)) { |
4358 | 4359 | ||
4359 | if (tcp_prune_queue(sk) < 0) | 4360 | if (tcp_prune_queue(sk) < 0) |
4360 | return -1; | 4361 | return -1; |
4361 | 4362 | ||
4362 | if (!sk_rmem_schedule(sk, size)) { | 4363 | if (!sk_rmem_schedule(sk, skb, size)) { |
4363 | if (!tcp_prune_ofo_queue(sk)) | 4364 | if (!tcp_prune_ofo_queue(sk)) |
4364 | return -1; | 4365 | return -1; |
4365 | 4366 | ||
4366 | if (!sk_rmem_schedule(sk, size)) | 4367 | if (!sk_rmem_schedule(sk, skb, size)) |
4367 | return -1; | 4368 | return -1; |
4368 | } | 4369 | } |
4369 | } | 4370 | } |
@@ -4418,7 +4419,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) | |||
4418 | 4419 | ||
4419 | TCP_ECN_check_ce(tp, skb); | 4420 | TCP_ECN_check_ce(tp, skb); |
4420 | 4421 | ||
4421 | if (unlikely(tcp_try_rmem_schedule(sk, skb->truesize))) { | 4422 | if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) { |
4422 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFODROP); | 4423 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFODROP); |
4423 | __kfree_skb(skb); | 4424 | __kfree_skb(skb); |
4424 | return; | 4425 | return; |
@@ -4552,17 +4553,17 @@ static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int | |||
4552 | 4553 | ||
4553 | int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size) | 4554 | int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size) |
4554 | { | 4555 | { |
4555 | struct sk_buff *skb; | 4556 | struct sk_buff *skb = NULL; |
4556 | struct tcphdr *th; | 4557 | struct tcphdr *th; |
4557 | bool fragstolen; | 4558 | bool fragstolen; |
4558 | 4559 | ||
4559 | if (tcp_try_rmem_schedule(sk, size + sizeof(*th))) | ||
4560 | goto err; | ||
4561 | |||
4562 | skb = alloc_skb(size + sizeof(*th), sk->sk_allocation); | 4560 | skb = alloc_skb(size + sizeof(*th), sk->sk_allocation); |
4563 | if (!skb) | 4561 | if (!skb) |
4564 | goto err; | 4562 | goto err; |
4565 | 4563 | ||
4564 | if (tcp_try_rmem_schedule(sk, skb, size + sizeof(*th))) | ||
4565 | goto err_free; | ||
4566 | |||
4566 | th = (struct tcphdr *)skb_put(skb, sizeof(*th)); | 4567 | th = (struct tcphdr *)skb_put(skb, sizeof(*th)); |
4567 | skb_reset_transport_header(skb); | 4568 | skb_reset_transport_header(skb); |
4568 | memset(th, 0, sizeof(*th)); | 4569 | memset(th, 0, sizeof(*th)); |
@@ -4633,7 +4634,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) | |||
4633 | if (eaten <= 0) { | 4634 | if (eaten <= 0) { |
4634 | queue_and_out: | 4635 | queue_and_out: |
4635 | if (eaten < 0 && | 4636 | if (eaten < 0 && |
4636 | tcp_try_rmem_schedule(sk, skb->truesize)) | 4637 | tcp_try_rmem_schedule(sk, skb, skb->truesize)) |
4637 | goto drop; | 4638 | goto drop; |
4638 | 4639 | ||
4639 | eaten = tcp_queue_rcv(sk, skb, 0, &fragstolen); | 4640 | eaten = tcp_queue_rcv(sk, skb, 0, &fragstolen); |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 7f91e5ac8277..42b2a6a73092 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -2633,7 +2633,7 @@ struct proto tcp_prot = { | |||
2633 | .compat_setsockopt = compat_tcp_setsockopt, | 2633 | .compat_setsockopt = compat_tcp_setsockopt, |
2634 | .compat_getsockopt = compat_tcp_getsockopt, | 2634 | .compat_getsockopt = compat_tcp_getsockopt, |
2635 | #endif | 2635 | #endif |
2636 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM | 2636 | #ifdef CONFIG_MEMCG_KMEM |
2637 | .init_cgroup = tcp_init_cgroup, | 2637 | .init_cgroup = tcp_init_cgroup, |
2638 | .destroy_cgroup = tcp_destroy_cgroup, | 2638 | .destroy_cgroup = tcp_destroy_cgroup, |
2639 | .proto_cgroup = tcp_proto_cgroup, | 2639 | .proto_cgroup = tcp_proto_cgroup, |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 33cd065cfbd8..3f1bcff0b10b 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -2045,7 +2045,8 @@ void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, | |||
2045 | if (unlikely(sk->sk_state == TCP_CLOSE)) | 2045 | if (unlikely(sk->sk_state == TCP_CLOSE)) |
2046 | return; | 2046 | return; |
2047 | 2047 | ||
2048 | if (tcp_write_xmit(sk, cur_mss, nonagle, 0, GFP_ATOMIC)) | 2048 | if (tcp_write_xmit(sk, cur_mss, nonagle, 0, |
2049 | sk_gfp_atomic(sk, GFP_ATOMIC))) | ||
2049 | tcp_check_probe_timer(sk); | 2050 | tcp_check_probe_timer(sk); |
2050 | } | 2051 | } |
2051 | 2052 | ||
@@ -2666,7 +2667,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, | |||
2666 | 2667 | ||
2667 | if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired) | 2668 | if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired) |
2668 | s_data_desired = cvp->s_data_desired; | 2669 | s_data_desired = cvp->s_data_desired; |
2669 | skb = alloc_skb(MAX_TCP_HEADER + 15 + s_data_desired, GFP_ATOMIC); | 2670 | skb = alloc_skb(MAX_TCP_HEADER + 15 + s_data_desired, |
2671 | sk_gfp_atomic(sk, GFP_ATOMIC)); | ||
2670 | if (unlikely(!skb)) { | 2672 | if (unlikely(!skb)) { |
2671 | dst_release(dst); | 2673 | dst_release(dst); |
2672 | return NULL; | 2674 | return NULL; |
@@ -3064,7 +3066,7 @@ void tcp_send_ack(struct sock *sk) | |||
3064 | * tcp_transmit_skb() will set the ownership to this | 3066 | * tcp_transmit_skb() will set the ownership to this |
3065 | * sock. | 3067 | * sock. |
3066 | */ | 3068 | */ |
3067 | buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC); | 3069 | buff = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC)); |
3068 | if (buff == NULL) { | 3070 | if (buff == NULL) { |
3069 | inet_csk_schedule_ack(sk); | 3071 | inet_csk_schedule_ack(sk); |
3070 | inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN; | 3072 | inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN; |
@@ -3079,7 +3081,7 @@ void tcp_send_ack(struct sock *sk) | |||
3079 | 3081 | ||
3080 | /* Send it off, this clears delayed acks for us. */ | 3082 | /* Send it off, this clears delayed acks for us. */ |
3081 | TCP_SKB_CB(buff)->when = tcp_time_stamp; | 3083 | TCP_SKB_CB(buff)->when = tcp_time_stamp; |
3082 | tcp_transmit_skb(sk, buff, 0, GFP_ATOMIC); | 3084 | tcp_transmit_skb(sk, buff, 0, sk_gfp_atomic(sk, GFP_ATOMIC)); |
3083 | } | 3085 | } |
3084 | 3086 | ||
3085 | /* This routine sends a packet with an out of date sequence | 3087 | /* This routine sends a packet with an out of date sequence |
@@ -3099,7 +3101,7 @@ static int tcp_xmit_probe_skb(struct sock *sk, int urgent) | |||
3099 | struct sk_buff *skb; | 3101 | struct sk_buff *skb; |
3100 | 3102 | ||
3101 | /* We don't queue it, tcp_transmit_skb() sets ownership. */ | 3103 | /* We don't queue it, tcp_transmit_skb() sets ownership. */ |
3102 | skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC); | 3104 | skb = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC)); |
3103 | if (skb == NULL) | 3105 | if (skb == NULL) |
3104 | return -1; | 3106 | return -1; |
3105 | 3107 | ||