diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-07-31 22:25:39 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-07-31 22:25:39 -0400 |
commit | ac694dbdbc403c00e2c14d10bc7b8412cc378259 (patch) | |
tree | e37328cfbeaf43716dd5914cad9179e57e84df76 /net/ipv4/tcp_input.c | |
parent | a40a1d3d0a2fd613fdec6d89d3c053268ced76ed (diff) | |
parent | 437ea90cc3afdca5229b41c6b1d38c4842756cb9 (diff) |
Merge branch 'akpm' (Andrew's patch-bomb)
Merge Andrew's second set of patches:
- MM
- a few random fixes
- a couple of RTC leftovers
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (120 commits)
rtc/rtc-88pm80x: remove unneed devm_kfree
rtc/rtc-88pm80x: assign ret only when rtc_register_driver fails
mm: hugetlbfs: close race during teardown of hugetlbfs shared page tables
tmpfs: distribute interleave better across nodes
mm: remove redundant initialization
mm: warn if pg_data_t isn't initialized with zero
mips: zero out pg_data_t when it's allocated
memcg: gix memory accounting scalability in shrink_page_list
mm/sparse: remove index_init_lock
mm/sparse: more checks on mem_section number
mm/sparse: optimize sparse_index_alloc
memcg: add mem_cgroup_from_css() helper
memcg: further prevent OOM with too many dirty pages
memcg: prevent OOM with too many dirty pages
mm: mmu_notifier: fix freed page still mapped in secondary MMU
mm: memcg: only check anon swapin page charges for swap cache
mm: memcg: only check swap cache pages for repeated charging
mm: memcg: split swapin charge function into private and public part
mm: memcg: remove needless !mm fixup to init_mm when charging
mm: memcg: remove unneeded shmem charge type
...
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r-- | net/ipv4/tcp_input.c | 21 |
1 files changed, 11 insertions, 10 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 9be30b039ae3..2fd2bc9e3c64 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -4351,19 +4351,20 @@ static void tcp_ofo_queue(struct sock *sk) | |||
4351 | static bool tcp_prune_ofo_queue(struct sock *sk); | 4351 | static bool tcp_prune_ofo_queue(struct sock *sk); |
4352 | static int tcp_prune_queue(struct sock *sk); | 4352 | static int tcp_prune_queue(struct sock *sk); |
4353 | 4353 | ||
4354 | static int tcp_try_rmem_schedule(struct sock *sk, unsigned int size) | 4354 | static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb, |
4355 | unsigned int size) | ||
4355 | { | 4356 | { |
4356 | if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || | 4357 | if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || |
4357 | !sk_rmem_schedule(sk, size)) { | 4358 | !sk_rmem_schedule(sk, skb, size)) { |
4358 | 4359 | ||
4359 | if (tcp_prune_queue(sk) < 0) | 4360 | if (tcp_prune_queue(sk) < 0) |
4360 | return -1; | 4361 | return -1; |
4361 | 4362 | ||
4362 | if (!sk_rmem_schedule(sk, size)) { | 4363 | if (!sk_rmem_schedule(sk, skb, size)) { |
4363 | if (!tcp_prune_ofo_queue(sk)) | 4364 | if (!tcp_prune_ofo_queue(sk)) |
4364 | return -1; | 4365 | return -1; |
4365 | 4366 | ||
4366 | if (!sk_rmem_schedule(sk, size)) | 4367 | if (!sk_rmem_schedule(sk, skb, size)) |
4367 | return -1; | 4368 | return -1; |
4368 | } | 4369 | } |
4369 | } | 4370 | } |
@@ -4418,7 +4419,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) | |||
4418 | 4419 | ||
4419 | TCP_ECN_check_ce(tp, skb); | 4420 | TCP_ECN_check_ce(tp, skb); |
4420 | 4421 | ||
4421 | if (unlikely(tcp_try_rmem_schedule(sk, skb->truesize))) { | 4422 | if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) { |
4422 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFODROP); | 4423 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFODROP); |
4423 | __kfree_skb(skb); | 4424 | __kfree_skb(skb); |
4424 | return; | 4425 | return; |
@@ -4552,17 +4553,17 @@ static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int | |||
4552 | 4553 | ||
4553 | int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size) | 4554 | int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size) |
4554 | { | 4555 | { |
4555 | struct sk_buff *skb; | 4556 | struct sk_buff *skb = NULL; |
4556 | struct tcphdr *th; | 4557 | struct tcphdr *th; |
4557 | bool fragstolen; | 4558 | bool fragstolen; |
4558 | 4559 | ||
4559 | if (tcp_try_rmem_schedule(sk, size + sizeof(*th))) | ||
4560 | goto err; | ||
4561 | |||
4562 | skb = alloc_skb(size + sizeof(*th), sk->sk_allocation); | 4560 | skb = alloc_skb(size + sizeof(*th), sk->sk_allocation); |
4563 | if (!skb) | 4561 | if (!skb) |
4564 | goto err; | 4562 | goto err; |
4565 | 4563 | ||
4564 | if (tcp_try_rmem_schedule(sk, skb, size + sizeof(*th))) | ||
4565 | goto err_free; | ||
4566 | |||
4566 | th = (struct tcphdr *)skb_put(skb, sizeof(*th)); | 4567 | th = (struct tcphdr *)skb_put(skb, sizeof(*th)); |
4567 | skb_reset_transport_header(skb); | 4568 | skb_reset_transport_header(skb); |
4568 | memset(th, 0, sizeof(*th)); | 4569 | memset(th, 0, sizeof(*th)); |
@@ -4633,7 +4634,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) | |||
4633 | if (eaten <= 0) { | 4634 | if (eaten <= 0) { |
4634 | queue_and_out: | 4635 | queue_and_out: |
4635 | if (eaten < 0 && | 4636 | if (eaten < 0 && |
4636 | tcp_try_rmem_schedule(sk, skb->truesize)) | 4637 | tcp_try_rmem_schedule(sk, skb, skb->truesize)) |
4637 | goto drop; | 4638 | goto drop; |
4638 | 4639 | ||
4639 | eaten = tcp_queue_rcv(sk, skb, 0, &fragstolen); | 4640 | eaten = tcp_queue_rcv(sk, skb, 0, &fragstolen); |