aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/caif/caif_socket.c2
-rw-r--r--net/core/sock.c14
-rw-r--r--net/ipv4/tcp_input.c21
-rw-r--r--net/sctp/ulpevent.c3
4 files changed, 27 insertions, 13 deletions
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 78f1cdad5b33..095259f83902 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -141,7 +141,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
141 err = sk_filter(sk, skb); 141 err = sk_filter(sk, skb);
142 if (err) 142 if (err)
143 return err; 143 return err;
144 if (!sk_rmem_schedule(sk, skb->truesize) && rx_flow_is_on(cf_sk)) { 144 if (!sk_rmem_schedule(sk, skb, skb->truesize) && rx_flow_is_on(cf_sk)) {
145 set_rx_flow_off(cf_sk); 145 set_rx_flow_off(cf_sk);
146 net_dbg_ratelimited("sending flow OFF due to rmem_schedule\n"); 146 net_dbg_ratelimited("sending flow OFF due to rmem_schedule\n");
147 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ); 147 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
diff --git a/net/core/sock.c b/net/core/sock.c
index 32fdcd2d6e8f..6b654b3ddfda 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -295,6 +295,18 @@ void sk_clear_memalloc(struct sock *sk)
295 sock_reset_flag(sk, SOCK_MEMALLOC); 295 sock_reset_flag(sk, SOCK_MEMALLOC);
296 sk->sk_allocation &= ~__GFP_MEMALLOC; 296 sk->sk_allocation &= ~__GFP_MEMALLOC;
297 static_key_slow_dec(&memalloc_socks); 297 static_key_slow_dec(&memalloc_socks);
298
299 /*
300 * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward
301 * progress of swapping. However, if SOCK_MEMALLOC is cleared while
302 * it has rmem allocations there is a risk that the user of the
303 * socket cannot make forward progress due to exceeding the rmem
304 * limits. By rights, sk_clear_memalloc() should only be called
305 * on sockets being torn down but warn and reset the accounting if
306 * that assumption breaks.
307 */
308 if (WARN_ON(sk->sk_forward_alloc))
309 sk_mem_reclaim(sk);
298} 310}
299EXPORT_SYMBOL_GPL(sk_clear_memalloc); 311EXPORT_SYMBOL_GPL(sk_clear_memalloc);
300 312
@@ -396,7 +408,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
396 if (err) 408 if (err)
397 return err; 409 return err;
398 410
399 if (!sk_rmem_schedule(sk, skb->truesize)) { 411 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
400 atomic_inc(&sk->sk_drops); 412 atomic_inc(&sk->sk_drops);
401 return -ENOBUFS; 413 return -ENOBUFS;
402 } 414 }
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index a356e1fecf9a..00b91b4b8665 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -4351,19 +4351,20 @@ static void tcp_ofo_queue(struct sock *sk)
4351static bool tcp_prune_ofo_queue(struct sock *sk); 4351static bool tcp_prune_ofo_queue(struct sock *sk);
4352static int tcp_prune_queue(struct sock *sk); 4352static int tcp_prune_queue(struct sock *sk);
4353 4353
4354static int tcp_try_rmem_schedule(struct sock *sk, unsigned int size) 4354static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb,
4355 unsigned int size)
4355{ 4356{
4356 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || 4357 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
4357 !sk_rmem_schedule(sk, size)) { 4358 !sk_rmem_schedule(sk, skb, size)) {
4358 4359
4359 if (tcp_prune_queue(sk) < 0) 4360 if (tcp_prune_queue(sk) < 0)
4360 return -1; 4361 return -1;
4361 4362
4362 if (!sk_rmem_schedule(sk, size)) { 4363 if (!sk_rmem_schedule(sk, skb, size)) {
4363 if (!tcp_prune_ofo_queue(sk)) 4364 if (!tcp_prune_ofo_queue(sk))
4364 return -1; 4365 return -1;
4365 4366
4366 if (!sk_rmem_schedule(sk, size)) 4367 if (!sk_rmem_schedule(sk, skb, size))
4367 return -1; 4368 return -1;
4368 } 4369 }
4369 } 4370 }
@@ -4418,7 +4419,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
4418 4419
4419 TCP_ECN_check_ce(tp, skb); 4420 TCP_ECN_check_ce(tp, skb);
4420 4421
4421 if (unlikely(tcp_try_rmem_schedule(sk, skb->truesize))) { 4422 if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) {
4422 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFODROP); 4423 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFODROP);
4423 __kfree_skb(skb); 4424 __kfree_skb(skb);
4424 return; 4425 return;
@@ -4552,17 +4553,17 @@ static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int
4552 4553
4553int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size) 4554int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size)
4554{ 4555{
4555 struct sk_buff *skb; 4556 struct sk_buff *skb = NULL;
4556 struct tcphdr *th; 4557 struct tcphdr *th;
4557 bool fragstolen; 4558 bool fragstolen;
4558 4559
4559 if (tcp_try_rmem_schedule(sk, size + sizeof(*th)))
4560 goto err;
4561
4562 skb = alloc_skb(size + sizeof(*th), sk->sk_allocation); 4560 skb = alloc_skb(size + sizeof(*th), sk->sk_allocation);
4563 if (!skb) 4561 if (!skb)
4564 goto err; 4562 goto err;
4565 4563
4564 if (tcp_try_rmem_schedule(sk, skb, size + sizeof(*th)))
4565 goto err_free;
4566
4566 th = (struct tcphdr *)skb_put(skb, sizeof(*th)); 4567 th = (struct tcphdr *)skb_put(skb, sizeof(*th));
4567 skb_reset_transport_header(skb); 4568 skb_reset_transport_header(skb);
4568 memset(th, 0, sizeof(*th)); 4569 memset(th, 0, sizeof(*th));
@@ -4633,7 +4634,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
4633 if (eaten <= 0) { 4634 if (eaten <= 0) {
4634queue_and_out: 4635queue_and_out:
4635 if (eaten < 0 && 4636 if (eaten < 0 &&
4636 tcp_try_rmem_schedule(sk, skb->truesize)) 4637 tcp_try_rmem_schedule(sk, skb, skb->truesize))
4637 goto drop; 4638 goto drop;
4638 4639
4639 eaten = tcp_queue_rcv(sk, skb, 0, &fragstolen); 4640 eaten = tcp_queue_rcv(sk, skb, 0, &fragstolen);
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index 33d894776192..10c018a5b9fe 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -702,7 +702,8 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
702 if (rx_count >= asoc->base.sk->sk_rcvbuf) { 702 if (rx_count >= asoc->base.sk->sk_rcvbuf) {
703 703
704 if ((asoc->base.sk->sk_userlocks & SOCK_RCVBUF_LOCK) || 704 if ((asoc->base.sk->sk_userlocks & SOCK_RCVBUF_LOCK) ||
705 (!sk_rmem_schedule(asoc->base.sk, chunk->skb->truesize))) 705 (!sk_rmem_schedule(asoc->base.sk, chunk->skb,
706 chunk->skb->truesize)))
706 goto fail; 707 goto fail;
707 } 708 }
708 709