aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/tcp.c')
-rw-r--r--net/ipv4/tcp.c38
1 files changed, 28 insertions, 10 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 9d2118e5fbc7..541f26a67ba2 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -426,6 +426,17 @@ void tcp_init_sock(struct sock *sk)
426} 426}
427EXPORT_SYMBOL(tcp_init_sock); 427EXPORT_SYMBOL(tcp_init_sock);
428 428
429static void tcp_tx_timestamp(struct sock *sk, struct sk_buff *skb)
430{
431 if (sk->sk_tsflags) {
432 struct skb_shared_info *shinfo = skb_shinfo(skb);
433
434 sock_tx_timestamp(sk, &shinfo->tx_flags);
435 if (shinfo->tx_flags & SKBTX_ANY_TSTAMP)
436 shinfo->tskey = TCP_SKB_CB(skb)->seq + skb->len - 1;
437 }
438}
439
429/* 440/*
430 * Wait for a TCP event. 441 * Wait for a TCP event.
431 * 442 *
@@ -523,7 +534,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
523 } 534 }
524 /* This barrier is coupled with smp_wmb() in tcp_reset() */ 535 /* This barrier is coupled with smp_wmb() in tcp_reset() */
525 smp_rmb(); 536 smp_rmb();
526 if (sk->sk_err) 537 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
527 mask |= POLLERR; 538 mask |= POLLERR;
528 539
529 return mask; 540 return mask;
@@ -959,8 +970,10 @@ new_segment:
959 970
960 copied += copy; 971 copied += copy;
961 offset += copy; 972 offset += copy;
962 if (!(size -= copy)) 973 if (!(size -= copy)) {
974 tcp_tx_timestamp(sk, skb);
963 goto out; 975 goto out;
976 }
964 977
965 if (skb->len < size_goal || (flags & MSG_OOB)) 978 if (skb->len < size_goal || (flags & MSG_OOB))
966 continue; 979 continue;
@@ -1175,13 +1188,6 @@ new_segment:
1175 goto wait_for_memory; 1188 goto wait_for_memory;
1176 1189
1177 /* 1190 /*
1178 * All packets are restored as if they have
1179 * already been sent.
1180 */
1181 if (tp->repair)
1182 TCP_SKB_CB(skb)->when = tcp_time_stamp;
1183
1184 /*
1185 * Check whether we can use HW checksum. 1191 * Check whether we can use HW checksum.
1186 */ 1192 */
1187 if (sk->sk_route_caps & NETIF_F_ALL_CSUM) 1193 if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
@@ -1190,6 +1196,13 @@ new_segment:
1190 skb_entail(sk, skb); 1196 skb_entail(sk, skb);
1191 copy = size_goal; 1197 copy = size_goal;
1192 max = size_goal; 1198 max = size_goal;
1199
1200 /* All packets are restored as if they have
1201 * already been sent. skb_mstamp isn't set to
1202 * avoid wrong rtt estimation.
1203 */
1204 if (tp->repair)
1205 TCP_SKB_CB(skb)->sacked |= TCPCB_REPAIRED;
1193 } 1206 }
1194 1207
1195 /* Try to append data to the end of skb. */ 1208 /* Try to append data to the end of skb. */
@@ -1252,8 +1265,10 @@ new_segment:
1252 1265
1253 from += copy; 1266 from += copy;
1254 copied += copy; 1267 copied += copy;
1255 if ((seglen -= copy) == 0 && iovlen == 0) 1268 if ((seglen -= copy) == 0 && iovlen == 0) {
1269 tcp_tx_timestamp(sk, skb);
1256 goto out; 1270 goto out;
1271 }
1257 1272
1258 if (skb->len < max || (flags & MSG_OOB) || unlikely(tp->repair)) 1273 if (skb->len < max || (flags & MSG_OOB) || unlikely(tp->repair))
1259 continue; 1274 continue;
@@ -1617,6 +1632,9 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1617 struct sk_buff *skb; 1632 struct sk_buff *skb;
1618 u32 urg_hole = 0; 1633 u32 urg_hole = 0;
1619 1634
1635 if (unlikely(flags & MSG_ERRQUEUE))
1636 return ip_recv_error(sk, msg, len, addr_len);
1637
1620 if (sk_can_busy_loop(sk) && skb_queue_empty(&sk->sk_receive_queue) && 1638 if (sk_can_busy_loop(sk) && skb_queue_empty(&sk->sk_receive_queue) &&
1621 (sk->sk_state == TCP_ESTABLISHED)) 1639 (sk->sk_state == TCP_ESTABLISHED))
1622 sk_busy_loop(sk, nonblock); 1640 sk_busy_loop(sk, nonblock);