aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2018-07-24 22:21:58 -0400
committerDavid S. Miller <davem@davemloft.net>2018-07-24 22:21:58 -0400
commit19725496da5602b401eae389736ab00d1817e264 (patch)
tree4c1a94bf0121769110f1b9c08ee337a55679a48a /net/ipv4
parentaea5f654e6b78a0c976f7a25950155932c77a53f (diff)
parent9981b4fb8684883dcc0daf088891ff32260b9794 (diff)
Merge ra.kernel.org:/pub/scm/linux/kernel/git/davem/net
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/igmp.c3
-rw-r--r--net/ipv4/ip_output.c2
-rw-r--r--net/ipv4/ip_sockglue.c7
-rw-r--r--net/ipv4/tcp_dctcp.c52
-rw-r--r--net/ipv4/tcp_input.c65
-rw-r--r--net/ipv4/tcp_output.c32
6 files changed, 100 insertions, 61 deletions
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 598333b123b9..bae9096821be 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1200,8 +1200,7 @@ static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im)
1200 spin_lock_bh(&im->lock); 1200 spin_lock_bh(&im->lock);
1201 if (pmc) { 1201 if (pmc) {
1202 im->interface = pmc->interface; 1202 im->interface = pmc->interface;
1203 im->sfmode = pmc->sfmode; 1203 if (im->sfmode == MCAST_INCLUDE) {
1204 if (pmc->sfmode == MCAST_INCLUDE) {
1205 im->tomb = pmc->tomb; 1204 im->tomb = pmc->tomb;
1206 im->sources = pmc->sources; 1205 im->sources = pmc->sources;
1207 for (psf = im->sources; psf; psf = psf->sf_next) 1206 for (psf = im->sources; psf; psf = psf->sf_next)
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index e2b6bd478afb..9c4e72e9c60a 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -524,6 +524,8 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
524 to->dev = from->dev; 524 to->dev = from->dev;
525 to->mark = from->mark; 525 to->mark = from->mark;
526 526
527 skb_copy_hash(to, from);
528
527 /* Copy the flags to each fragment. */ 529 /* Copy the flags to each fragment. */
528 IPCB(to)->flags = IPCB(from)->flags; 530 IPCB(to)->flags = IPCB(from)->flags;
529 531
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 64c76dcf7386..c0fe5ad996f2 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -150,15 +150,18 @@ static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
150{ 150{
151 struct sockaddr_in sin; 151 struct sockaddr_in sin;
152 const struct iphdr *iph = ip_hdr(skb); 152 const struct iphdr *iph = ip_hdr(skb);
153 __be16 *ports = (__be16 *)skb_transport_header(skb); 153 __be16 *ports;
154 int end;
154 155
155 if (skb_transport_offset(skb) + 4 > (int)skb->len) 156 end = skb_transport_offset(skb) + 4;
157 if (end > 0 && !pskb_may_pull(skb, end))
156 return; 158 return;
157 159
158 /* All current transport protocols have the port numbers in the 160 /* All current transport protocols have the port numbers in the
159 * first four bytes of the transport header and this function is 161 * first four bytes of the transport header and this function is
160 * written with this assumption in mind. 162 * written with this assumption in mind.
161 */ 163 */
164 ports = (__be16 *)skb_transport_header(skb);
162 165
163 sin.sin_family = AF_INET; 166 sin.sin_family = AF_INET;
164 sin.sin_addr.s_addr = iph->daddr; 167 sin.sin_addr.s_addr = iph->daddr;
diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c
index 5869f89ca656..8b637f9f23a2 100644
--- a/net/ipv4/tcp_dctcp.c
+++ b/net/ipv4/tcp_dctcp.c
@@ -129,24 +129,14 @@ static void dctcp_ce_state_0_to_1(struct sock *sk)
129 struct dctcp *ca = inet_csk_ca(sk); 129 struct dctcp *ca = inet_csk_ca(sk);
130 struct tcp_sock *tp = tcp_sk(sk); 130 struct tcp_sock *tp = tcp_sk(sk);
131 131
132 /* State has changed from CE=0 to CE=1 and delayed 132 if (!ca->ce_state) {
133 * ACK has not sent yet. 133 /* State has changed from CE=0 to CE=1, force an immediate
134 */ 134 * ACK to reflect the new CE state. If an ACK was delayed,
135 if (!ca->ce_state && 135 * send that first to reflect the prior CE state.
136 inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) { 136 */
137 u32 tmp_rcv_nxt; 137 if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER)
138 138 __tcp_send_ack(sk, ca->prior_rcv_nxt);
139 /* Save current rcv_nxt. */ 139 tcp_enter_quickack_mode(sk, 1);
140 tmp_rcv_nxt = tp->rcv_nxt;
141
142 /* Generate previous ack with CE=0. */
143 tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
144 tp->rcv_nxt = ca->prior_rcv_nxt;
145
146 tcp_send_ack(sk);
147
148 /* Recover current rcv_nxt. */
149 tp->rcv_nxt = tmp_rcv_nxt;
150 } 140 }
151 141
152 ca->prior_rcv_nxt = tp->rcv_nxt; 142 ca->prior_rcv_nxt = tp->rcv_nxt;
@@ -160,24 +150,14 @@ static void dctcp_ce_state_1_to_0(struct sock *sk)
160 struct dctcp *ca = inet_csk_ca(sk); 150 struct dctcp *ca = inet_csk_ca(sk);
161 struct tcp_sock *tp = tcp_sk(sk); 151 struct tcp_sock *tp = tcp_sk(sk);
162 152
163 /* State has changed from CE=1 to CE=0 and delayed 153 if (ca->ce_state) {
164 * ACK has not sent yet. 154 /* State has changed from CE=1 to CE=0, force an immediate
165 */ 155 * ACK to reflect the new CE state. If an ACK was delayed,
166 if (ca->ce_state && 156 * send that first to reflect the prior CE state.
167 inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) { 157 */
168 u32 tmp_rcv_nxt; 158 if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER)
169 159 __tcp_send_ack(sk, ca->prior_rcv_nxt);
170 /* Save current rcv_nxt. */ 160 tcp_enter_quickack_mode(sk, 1);
171 tmp_rcv_nxt = tp->rcv_nxt;
172
173 /* Generate previous ack with CE=1. */
174 tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
175 tp->rcv_nxt = ca->prior_rcv_nxt;
176
177 tcp_send_ack(sk);
178
179 /* Recover current rcv_nxt. */
180 tp->rcv_nxt = tmp_rcv_nxt;
181 } 161 }
182 162
183 ca->prior_rcv_nxt = tp->rcv_nxt; 163 ca->prior_rcv_nxt = tp->rcv_nxt;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 91dbb9afb950..d51fa358b2b1 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -216,7 +216,7 @@ static void tcp_incr_quickack(struct sock *sk, unsigned int max_quickacks)
216 icsk->icsk_ack.quick = quickacks; 216 icsk->icsk_ack.quick = quickacks;
217} 217}
218 218
219static void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks) 219void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks)
220{ 220{
221 struct inet_connection_sock *icsk = inet_csk(sk); 221 struct inet_connection_sock *icsk = inet_csk(sk);
222 222
@@ -224,6 +224,7 @@ static void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks)
224 icsk->icsk_ack.pingpong = 0; 224 icsk->icsk_ack.pingpong = 0;
225 icsk->icsk_ack.ato = TCP_ATO_MIN; 225 icsk->icsk_ack.ato = TCP_ATO_MIN;
226} 226}
227EXPORT_SYMBOL(tcp_enter_quickack_mode);
227 228
228/* Send ACKs quickly, if "quick" count is not exhausted 229/* Send ACKs quickly, if "quick" count is not exhausted
229 * and the session is not interactive. 230 * and the session is not interactive.
@@ -4366,6 +4367,23 @@ static bool tcp_try_coalesce(struct sock *sk,
4366 return true; 4367 return true;
4367} 4368}
4368 4369
4370static bool tcp_ooo_try_coalesce(struct sock *sk,
4371 struct sk_buff *to,
4372 struct sk_buff *from,
4373 bool *fragstolen)
4374{
4375 bool res = tcp_try_coalesce(sk, to, from, fragstolen);
4376
4377 /* In case tcp_drop() is called later, update to->gso_segs */
4378 if (res) {
4379 u32 gso_segs = max_t(u16, 1, skb_shinfo(to)->gso_segs) +
4380 max_t(u16, 1, skb_shinfo(from)->gso_segs);
4381
4382 skb_shinfo(to)->gso_segs = min_t(u32, gso_segs, 0xFFFF);
4383 }
4384 return res;
4385}
4386
4369static void tcp_drop(struct sock *sk, struct sk_buff *skb) 4387static void tcp_drop(struct sock *sk, struct sk_buff *skb)
4370{ 4388{
4371 sk_drops_add(sk, skb); 4389 sk_drops_add(sk, skb);
@@ -4489,8 +4507,8 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
4489 /* In the typical case, we are adding an skb to the end of the list. 4507 /* In the typical case, we are adding an skb to the end of the list.
4490 * Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup. 4508 * Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup.
4491 */ 4509 */
4492 if (tcp_try_coalesce(sk, tp->ooo_last_skb, 4510 if (tcp_ooo_try_coalesce(sk, tp->ooo_last_skb,
4493 skb, &fragstolen)) { 4511 skb, &fragstolen)) {
4494coalesce_done: 4512coalesce_done:
4495 tcp_grow_window(sk, skb); 4513 tcp_grow_window(sk, skb);
4496 kfree_skb_partial(skb, fragstolen); 4514 kfree_skb_partial(skb, fragstolen);
@@ -4518,7 +4536,7 @@ coalesce_done:
4518 /* All the bits are present. Drop. */ 4536 /* All the bits are present. Drop. */
4519 NET_INC_STATS(sock_net(sk), 4537 NET_INC_STATS(sock_net(sk),
4520 LINUX_MIB_TCPOFOMERGE); 4538 LINUX_MIB_TCPOFOMERGE);
4521 __kfree_skb(skb); 4539 tcp_drop(sk, skb);
4522 skb = NULL; 4540 skb = NULL;
4523 tcp_dsack_set(sk, seq, end_seq); 4541 tcp_dsack_set(sk, seq, end_seq);
4524 goto add_sack; 4542 goto add_sack;
@@ -4537,11 +4555,11 @@ coalesce_done:
4537 TCP_SKB_CB(skb1)->end_seq); 4555 TCP_SKB_CB(skb1)->end_seq);
4538 NET_INC_STATS(sock_net(sk), 4556 NET_INC_STATS(sock_net(sk),
4539 LINUX_MIB_TCPOFOMERGE); 4557 LINUX_MIB_TCPOFOMERGE);
4540 __kfree_skb(skb1); 4558 tcp_drop(sk, skb1);
4541 goto merge_right; 4559 goto merge_right;
4542 } 4560 }
4543 } else if (tcp_try_coalesce(sk, skb1, 4561 } else if (tcp_ooo_try_coalesce(sk, skb1,
4544 skb, &fragstolen)) { 4562 skb, &fragstolen)) {
4545 goto coalesce_done; 4563 goto coalesce_done;
4546 } 4564 }
4547 p = &parent->rb_right; 4565 p = &parent->rb_right;
@@ -4924,6 +4942,7 @@ end:
4924static void tcp_collapse_ofo_queue(struct sock *sk) 4942static void tcp_collapse_ofo_queue(struct sock *sk)
4925{ 4943{
4926 struct tcp_sock *tp = tcp_sk(sk); 4944 struct tcp_sock *tp = tcp_sk(sk);
4945 u32 range_truesize, sum_tiny = 0;
4927 struct sk_buff *skb, *head; 4946 struct sk_buff *skb, *head;
4928 u32 start, end; 4947 u32 start, end;
4929 4948
@@ -4935,6 +4954,7 @@ new_range:
4935 } 4954 }
4936 start = TCP_SKB_CB(skb)->seq; 4955 start = TCP_SKB_CB(skb)->seq;
4937 end = TCP_SKB_CB(skb)->end_seq; 4956 end = TCP_SKB_CB(skb)->end_seq;
4957 range_truesize = skb->truesize;
4938 4958
4939 for (head = skb;;) { 4959 for (head = skb;;) {
4940 skb = skb_rb_next(skb); 4960 skb = skb_rb_next(skb);
@@ -4945,11 +4965,20 @@ new_range:
4945 if (!skb || 4965 if (!skb ||
4946 after(TCP_SKB_CB(skb)->seq, end) || 4966 after(TCP_SKB_CB(skb)->seq, end) ||
4947 before(TCP_SKB_CB(skb)->end_seq, start)) { 4967 before(TCP_SKB_CB(skb)->end_seq, start)) {
4948 tcp_collapse(sk, NULL, &tp->out_of_order_queue, 4968 /* Do not attempt collapsing tiny skbs */
4949 head, skb, start, end); 4969 if (range_truesize != head->truesize ||
4970 end - start >= SKB_WITH_OVERHEAD(SK_MEM_QUANTUM)) {
4971 tcp_collapse(sk, NULL, &tp->out_of_order_queue,
4972 head, skb, start, end);
4973 } else {
4974 sum_tiny += range_truesize;
4975 if (sum_tiny > sk->sk_rcvbuf >> 3)
4976 return;
4977 }
4950 goto new_range; 4978 goto new_range;
4951 } 4979 }
4952 4980
4981 range_truesize += skb->truesize;
4953 if (unlikely(before(TCP_SKB_CB(skb)->seq, start))) 4982 if (unlikely(before(TCP_SKB_CB(skb)->seq, start)))
4954 start = TCP_SKB_CB(skb)->seq; 4983 start = TCP_SKB_CB(skb)->seq;
4955 if (after(TCP_SKB_CB(skb)->end_seq, end)) 4984 if (after(TCP_SKB_CB(skb)->end_seq, end))
@@ -4964,6 +4993,7 @@ new_range:
4964 * 2) not add too big latencies if thousands of packets sit there. 4993 * 2) not add too big latencies if thousands of packets sit there.
4965 * (But if application shrinks SO_RCVBUF, we could still end up 4994 * (But if application shrinks SO_RCVBUF, we could still end up
4966 * freeing whole queue here) 4995 * freeing whole queue here)
4996 * 3) Drop at least 12.5 % of sk_rcvbuf to avoid malicious attacks.
4967 * 4997 *
4968 * Return true if queue has shrunk. 4998 * Return true if queue has shrunk.
4969 */ 4999 */
@@ -4971,20 +5001,26 @@ static bool tcp_prune_ofo_queue(struct sock *sk)
4971{ 5001{
4972 struct tcp_sock *tp = tcp_sk(sk); 5002 struct tcp_sock *tp = tcp_sk(sk);
4973 struct rb_node *node, *prev; 5003 struct rb_node *node, *prev;
5004 int goal;
4974 5005
4975 if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) 5006 if (RB_EMPTY_ROOT(&tp->out_of_order_queue))
4976 return false; 5007 return false;
4977 5008
4978 NET_INC_STATS(sock_net(sk), LINUX_MIB_OFOPRUNED); 5009 NET_INC_STATS(sock_net(sk), LINUX_MIB_OFOPRUNED);
5010 goal = sk->sk_rcvbuf >> 3;
4979 node = &tp->ooo_last_skb->rbnode; 5011 node = &tp->ooo_last_skb->rbnode;
4980 do { 5012 do {
4981 prev = rb_prev(node); 5013 prev = rb_prev(node);
4982 rb_erase(node, &tp->out_of_order_queue); 5014 rb_erase(node, &tp->out_of_order_queue);
5015 goal -= rb_to_skb(node)->truesize;
4983 tcp_drop(sk, rb_to_skb(node)); 5016 tcp_drop(sk, rb_to_skb(node));
4984 sk_mem_reclaim(sk); 5017 if (!prev || goal <= 0) {
4985 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf && 5018 sk_mem_reclaim(sk);
4986 !tcp_under_memory_pressure(sk)) 5019 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
4987 break; 5020 !tcp_under_memory_pressure(sk))
5021 break;
5022 goal = sk->sk_rcvbuf >> 3;
5023 }
4988 node = prev; 5024 node = prev;
4989 } while (node); 5025 } while (node);
4990 tp->ooo_last_skb = rb_to_skb(prev); 5026 tp->ooo_last_skb = rb_to_skb(prev);
@@ -5019,6 +5055,9 @@ static int tcp_prune_queue(struct sock *sk)
5019 else if (tcp_under_memory_pressure(sk)) 5055 else if (tcp_under_memory_pressure(sk))
5020 tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss); 5056 tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
5021 5057
5058 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
5059 return 0;
5060
5022 tcp_collapse_ofo_queue(sk); 5061 tcp_collapse_ofo_queue(sk);
5023 if (!skb_queue_empty(&sk->sk_receive_queue)) 5062 if (!skb_queue_empty(&sk->sk_receive_queue))
5024 tcp_collapse(sk, &sk->sk_receive_queue, NULL, 5063 tcp_collapse(sk, &sk->sk_receive_queue, NULL,
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 6cbab56e7407..490df62f26d4 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -160,7 +160,8 @@ static void tcp_event_data_sent(struct tcp_sock *tp,
160} 160}
161 161
162/* Account for an ACK we sent. */ 162/* Account for an ACK we sent. */
163static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts) 163static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts,
164 u32 rcv_nxt)
164{ 165{
165 struct tcp_sock *tp = tcp_sk(sk); 166 struct tcp_sock *tp = tcp_sk(sk);
166 167
@@ -171,6 +172,9 @@ static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts)
171 if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1) 172 if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1)
172 __sock_put(sk); 173 __sock_put(sk);
173 } 174 }
175
176 if (unlikely(rcv_nxt != tp->rcv_nxt))
177 return; /* Special ACK sent by DCTCP to reflect ECN */
174 tcp_dec_quickack_mode(sk, pkts); 178 tcp_dec_quickack_mode(sk, pkts);
175 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); 179 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
176} 180}
@@ -1009,8 +1013,8 @@ static void tcp_update_skb_after_send(struct tcp_sock *tp, struct sk_buff *skb)
1009 * We are working here with either a clone of the original 1013 * We are working here with either a clone of the original
1010 * SKB, or a fresh unique copy made by the retransmit engine. 1014 * SKB, or a fresh unique copy made by the retransmit engine.
1011 */ 1015 */
1012static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, 1016static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
1013 gfp_t gfp_mask) 1017 int clone_it, gfp_t gfp_mask, u32 rcv_nxt)
1014{ 1018{
1015 const struct inet_connection_sock *icsk = inet_csk(sk); 1019 const struct inet_connection_sock *icsk = inet_csk(sk);
1016 struct inet_sock *inet; 1020 struct inet_sock *inet;
@@ -1086,7 +1090,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
1086 th->source = inet->inet_sport; 1090 th->source = inet->inet_sport;
1087 th->dest = inet->inet_dport; 1091 th->dest = inet->inet_dport;
1088 th->seq = htonl(tcb->seq); 1092 th->seq = htonl(tcb->seq);
1089 th->ack_seq = htonl(tp->rcv_nxt); 1093 th->ack_seq = htonl(rcv_nxt);
1090 *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) | 1094 *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) |
1091 tcb->tcp_flags); 1095 tcb->tcp_flags);
1092 1096
@@ -1127,7 +1131,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
1127 icsk->icsk_af_ops->send_check(sk, skb); 1131 icsk->icsk_af_ops->send_check(sk, skb);
1128 1132
1129 if (likely(tcb->tcp_flags & TCPHDR_ACK)) 1133 if (likely(tcb->tcp_flags & TCPHDR_ACK))
1130 tcp_event_ack_sent(sk, tcp_skb_pcount(skb)); 1134 tcp_event_ack_sent(sk, tcp_skb_pcount(skb), rcv_nxt);
1131 1135
1132 if (skb->len != tcp_header_size) { 1136 if (skb->len != tcp_header_size) {
1133 tcp_event_data_sent(tp, sk); 1137 tcp_event_data_sent(tp, sk);
@@ -1164,6 +1168,13 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
1164 return err; 1168 return err;
1165} 1169}
1166 1170
1171static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
1172 gfp_t gfp_mask)
1173{
1174 return __tcp_transmit_skb(sk, skb, clone_it, gfp_mask,
1175 tcp_sk(sk)->rcv_nxt);
1176}
1177
1167/* This routine just queues the buffer for sending. 1178/* This routine just queues the buffer for sending.
1168 * 1179 *
1169 * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames, 1180 * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames,
@@ -3557,7 +3568,7 @@ void tcp_send_delayed_ack(struct sock *sk)
3557} 3568}
3558 3569
3559/* This routine sends an ack and also updates the window. */ 3570/* This routine sends an ack and also updates the window. */
3560void tcp_send_ack(struct sock *sk) 3571void __tcp_send_ack(struct sock *sk, u32 rcv_nxt)
3561{ 3572{
3562 struct sk_buff *buff; 3573 struct sk_buff *buff;
3563 3574
@@ -3590,9 +3601,14 @@ void tcp_send_ack(struct sock *sk)
3590 skb_set_tcp_pure_ack(buff); 3601 skb_set_tcp_pure_ack(buff);
3591 3602
3592 /* Send it off, this clears delayed acks for us. */ 3603 /* Send it off, this clears delayed acks for us. */
3593 tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0); 3604 __tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0, rcv_nxt);
3605}
3606EXPORT_SYMBOL_GPL(__tcp_send_ack);
3607
3608void tcp_send_ack(struct sock *sk)
3609{
3610 __tcp_send_ack(sk, tcp_sk(sk)->rcv_nxt);
3594} 3611}
3595EXPORT_SYMBOL_GPL(tcp_send_ack);
3596 3612
3597/* This routine sends a packet with an out of date sequence 3613/* This routine sends a packet with an out of date sequence
3598 * number. It assumes the other end will try to ack it. 3614 * number. It assumes the other end will try to ack it.