diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-07 20:39:25 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-07 20:39:25 -0400 |
commit | d0cd84817c745655428dbfdb1e3f754230b46bef (patch) | |
tree | a7b6f422f6ac50f506ffa7a66f8e83387f90f212 /net/ipv4/tcp_input.c | |
parent | bdf428feb225229b1d4715b45bbdad4a934cd89c (diff) | |
parent | 3f334078567245429540e6461c81c749fce87f70 (diff) |
Merge tag 'dmaengine-3.17' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/dmaengine
Pull dmaengine updates from Dan Williams:
"Even though this has fixes marked for -stable, given the size and the
needed conflict resolutions this is 3.18-rc1/merge-window material.
These patches have been languishing in my tree for a long while. The
fact that I do not have the time to do proper/prompt maintenance of
this tree is a primary factor in the decision to step down as
dmaengine maintainer. That and the fact that the bulk of drivers/dma/
activity is going through Vinod these days.
The net_dma removal has not been in -next. It has developed simple
conflicts against mainline and net-next (for-3.18).
Continuing thanks to Vinod for staying on top of drivers/dma/.
Summary:
1/ Step down as dmaengine maintainer see commit 08223d80df38
"dmaengine maintainer update"
2/ Removal of net_dma, as it has been marked 'broken' since 3.13
(commit 77873803363c "net_dma: mark broken"), without reports of
performance regression.
3/ Miscellaneous fixes"
* tag 'dmaengine-3.17' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/dmaengine:
net: make tcp_cleanup_rbuf private
net_dma: revert 'copied_early'
net_dma: simple removal
dmaengine maintainer update
dmatest: prevent memory leakage on error path in thread
ioat: Use time_before_jiffies()
dmaengine: fix xor sources continuation
dma: mv_xor: Rename __mv_xor_slot_cleanup() to mv_xor_slot_cleanup()
dma: mv_xor: Remove all callers of mv_xor_slot_cleanup()
dma: mv_xor: Remove unneeded mv_xor_clean_completed_slots() call
ioat: Use pci_enable_msix_exact() instead of pci_enable_msix()
drivers: dma: Include appropriate header file in dca.c
drivers: dma: Mark functions as static in dma_v3.c
dma: mv_xor: Add DMA API error checks
ioat/dca: Use dev_is_pci() to check whether it is pci device
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r-- | net/ipv4/tcp_input.c | 83 |
1 files changed, 8 insertions, 75 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index a906e0200ff2..0185eea59342 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -73,7 +73,6 @@ | |||
73 | #include <net/inet_common.h> | 73 | #include <net/inet_common.h> |
74 | #include <linux/ipsec.h> | 74 | #include <linux/ipsec.h> |
75 | #include <asm/unaligned.h> | 75 | #include <asm/unaligned.h> |
76 | #include <net/netdma.h> | ||
77 | #include <linux/errqueue.h> | 76 | #include <linux/errqueue.h> |
78 | 77 | ||
79 | int sysctl_tcp_timestamps __read_mostly = 1; | 78 | int sysctl_tcp_timestamps __read_mostly = 1; |
@@ -4951,53 +4950,6 @@ static inline bool tcp_checksum_complete_user(struct sock *sk, | |||
4951 | __tcp_checksum_complete_user(sk, skb); | 4950 | __tcp_checksum_complete_user(sk, skb); |
4952 | } | 4951 | } |
4953 | 4952 | ||
4954 | #ifdef CONFIG_NET_DMA | ||
4955 | static bool tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb, | ||
4956 | int hlen) | ||
4957 | { | ||
4958 | struct tcp_sock *tp = tcp_sk(sk); | ||
4959 | int chunk = skb->len - hlen; | ||
4960 | int dma_cookie; | ||
4961 | bool copied_early = false; | ||
4962 | |||
4963 | if (tp->ucopy.wakeup) | ||
4964 | return false; | ||
4965 | |||
4966 | if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) | ||
4967 | tp->ucopy.dma_chan = net_dma_find_channel(); | ||
4968 | |||
4969 | if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) { | ||
4970 | |||
4971 | dma_cookie = dma_skb_copy_datagram_iovec(tp->ucopy.dma_chan, | ||
4972 | skb, hlen, | ||
4973 | tp->ucopy.iov, chunk, | ||
4974 | tp->ucopy.pinned_list); | ||
4975 | |||
4976 | if (dma_cookie < 0) | ||
4977 | goto out; | ||
4978 | |||
4979 | tp->ucopy.dma_cookie = dma_cookie; | ||
4980 | copied_early = true; | ||
4981 | |||
4982 | tp->ucopy.len -= chunk; | ||
4983 | tp->copied_seq += chunk; | ||
4984 | tcp_rcv_space_adjust(sk); | ||
4985 | |||
4986 | if ((tp->ucopy.len == 0) || | ||
4987 | (tcp_flag_word(tcp_hdr(skb)) & TCP_FLAG_PSH) || | ||
4988 | (atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1))) { | ||
4989 | tp->ucopy.wakeup = 1; | ||
4990 | sk->sk_data_ready(sk); | ||
4991 | } | ||
4992 | } else if (chunk > 0) { | ||
4993 | tp->ucopy.wakeup = 1; | ||
4994 | sk->sk_data_ready(sk); | ||
4995 | } | ||
4996 | out: | ||
4997 | return copied_early; | ||
4998 | } | ||
4999 | #endif /* CONFIG_NET_DMA */ | ||
5000 | |||
5001 | /* Does PAWS and seqno based validation of an incoming segment, flags will | 4953 | /* Does PAWS and seqno based validation of an incoming segment, flags will |
5002 | * play significant role here. | 4954 | * play significant role here. |
5003 | */ | 4955 | */ |
@@ -5177,27 +5129,15 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
5177 | } | 5129 | } |
5178 | } else { | 5130 | } else { |
5179 | int eaten = 0; | 5131 | int eaten = 0; |
5180 | int copied_early = 0; | ||
5181 | bool fragstolen = false; | 5132 | bool fragstolen = false; |
5182 | 5133 | ||
5183 | if (tp->copied_seq == tp->rcv_nxt && | 5134 | if (tp->ucopy.task == current && |
5184 | len - tcp_header_len <= tp->ucopy.len) { | 5135 | tp->copied_seq == tp->rcv_nxt && |
5185 | #ifdef CONFIG_NET_DMA | 5136 | len - tcp_header_len <= tp->ucopy.len && |
5186 | if (tp->ucopy.task == current && | 5137 | sock_owned_by_user(sk)) { |
5187 | sock_owned_by_user(sk) && | 5138 | __set_current_state(TASK_RUNNING); |
5188 | tcp_dma_try_early_copy(sk, skb, tcp_header_len)) { | ||
5189 | copied_early = 1; | ||
5190 | eaten = 1; | ||
5191 | } | ||
5192 | #endif | ||
5193 | if (tp->ucopy.task == current && | ||
5194 | sock_owned_by_user(sk) && !copied_early) { | ||
5195 | __set_current_state(TASK_RUNNING); | ||
5196 | 5139 | ||
5197 | if (!tcp_copy_to_iovec(sk, skb, tcp_header_len)) | 5140 | if (!tcp_copy_to_iovec(sk, skb, tcp_header_len)) { |
5198 | eaten = 1; | ||
5199 | } | ||
5200 | if (eaten) { | ||
5201 | /* Predicted packet is in window by definition. | 5141 | /* Predicted packet is in window by definition. |
5202 | * seq == rcv_nxt and rcv_wup <= rcv_nxt. | 5142 | * seq == rcv_nxt and rcv_wup <= rcv_nxt. |
5203 | * Hence, check seq<=rcv_wup reduces to: | 5143 | * Hence, check seq<=rcv_wup reduces to: |
@@ -5213,9 +5153,8 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
5213 | __skb_pull(skb, tcp_header_len); | 5153 | __skb_pull(skb, tcp_header_len); |
5214 | tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; | 5154 | tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; |
5215 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITSTOUSER); | 5155 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITSTOUSER); |
5156 | eaten = 1; | ||
5216 | } | 5157 | } |
5217 | if (copied_early) | ||
5218 | tcp_cleanup_rbuf(sk, skb->len); | ||
5219 | } | 5158 | } |
5220 | if (!eaten) { | 5159 | if (!eaten) { |
5221 | if (tcp_checksum_complete_user(sk, skb)) | 5160 | if (tcp_checksum_complete_user(sk, skb)) |
@@ -5252,14 +5191,8 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
5252 | goto no_ack; | 5191 | goto no_ack; |
5253 | } | 5192 | } |
5254 | 5193 | ||
5255 | if (!copied_early || tp->rcv_nxt != tp->rcv_wup) | 5194 | __tcp_ack_snd_check(sk, 0); |
5256 | __tcp_ack_snd_check(sk, 0); | ||
5257 | no_ack: | 5195 | no_ack: |
5258 | #ifdef CONFIG_NET_DMA | ||
5259 | if (copied_early) | ||
5260 | __skb_queue_tail(&sk->sk_async_wait_queue, skb); | ||
5261 | else | ||
5262 | #endif | ||
5263 | if (eaten) | 5196 | if (eaten) |
5264 | kfree_skb_partial(skb, fragstolen); | 5197 | kfree_skb_partial(skb, fragstolen); |
5265 | sk->sk_data_ready(sk); | 5198 | sk->sk_data_ready(sk); |