aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/tcp.c')
-rw-r--r--net/ipv4/tcp.c131
1 files changed, 77 insertions, 54 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 3834b10b5115..2cf9a898ce50 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -297,7 +297,7 @@ EXPORT_SYMBOL(tcp_sockets_allocated);
297 * All the sk_stream_mem_schedule() is of this nature: accounting 297 * All the sk_stream_mem_schedule() is of this nature: accounting
298 * is strict, actions are advisory and have some latency. 298 * is strict, actions are advisory and have some latency.
299 */ 299 */
300int tcp_memory_pressure; 300int tcp_memory_pressure __read_mostly;
301 301
302EXPORT_SYMBOL(tcp_memory_pressure); 302EXPORT_SYMBOL(tcp_memory_pressure);
303 303
@@ -425,7 +425,7 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
425 /* Subtract 1, if FIN is in queue. */ 425 /* Subtract 1, if FIN is in queue. */
426 if (answ && !skb_queue_empty(&sk->sk_receive_queue)) 426 if (answ && !skb_queue_empty(&sk->sk_receive_queue))
427 answ -= 427 answ -=
428 ((struct sk_buff *)sk->sk_receive_queue.prev)->h.th->fin; 428 tcp_hdr((struct sk_buff *)sk->sk_receive_queue.prev)->fin;
429 } else 429 } else
430 answ = tp->urg_seq - tp->copied_seq; 430 answ = tp->urg_seq - tp->copied_seq;
431 release_sock(sk); 431 release_sock(sk);
@@ -444,7 +444,7 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
444 break; 444 break;
445 default: 445 default:
446 return -ENOIOCTLCMD; 446 return -ENOIOCTLCMD;
447 }; 447 }
448 448
449 return put_user(answ, (int __user *)arg); 449 return put_user(answ, (int __user *)arg);
450} 450}
@@ -460,9 +460,9 @@ static inline int forced_push(struct tcp_sock *tp)
460 return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1)); 460 return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
461} 461}
462 462
463static inline void skb_entail(struct sock *sk, struct tcp_sock *tp, 463static inline void skb_entail(struct sock *sk, struct sk_buff *skb)
464 struct sk_buff *skb)
465{ 464{
465 struct tcp_sock *tp = tcp_sk(sk);
466 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); 466 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
467 467
468 skb->csum = 0; 468 skb->csum = 0;
@@ -470,10 +470,8 @@ static inline void skb_entail(struct sock *sk, struct tcp_sock *tp,
470 tcb->flags = TCPCB_FLAG_ACK; 470 tcb->flags = TCPCB_FLAG_ACK;
471 tcb->sacked = 0; 471 tcb->sacked = 0;
472 skb_header_release(skb); 472 skb_header_release(skb);
473 __skb_queue_tail(&sk->sk_write_queue, skb); 473 tcp_add_write_queue_tail(sk, skb);
474 sk_charge_skb(sk, skb); 474 sk_charge_skb(sk, skb);
475 if (!sk->sk_send_head)
476 sk->sk_send_head = skb;
477 if (tp->nonagle & TCP_NAGLE_PUSH) 475 if (tp->nonagle & TCP_NAGLE_PUSH)
478 tp->nonagle &= ~TCP_NAGLE_PUSH; 476 tp->nonagle &= ~TCP_NAGLE_PUSH;
479} 477}
@@ -488,15 +486,17 @@ static inline void tcp_mark_urg(struct tcp_sock *tp, int flags,
488 } 486 }
489} 487}
490 488
491static inline void tcp_push(struct sock *sk, struct tcp_sock *tp, int flags, 489static inline void tcp_push(struct sock *sk, int flags, int mss_now,
492 int mss_now, int nonagle) 490 int nonagle)
493{ 491{
494 if (sk->sk_send_head) { 492 struct tcp_sock *tp = tcp_sk(sk);
495 struct sk_buff *skb = sk->sk_write_queue.prev; 493
494 if (tcp_send_head(sk)) {
495 struct sk_buff *skb = tcp_write_queue_tail(sk);
496 if (!(flags & MSG_MORE) || forced_push(tp)) 496 if (!(flags & MSG_MORE) || forced_push(tp))
497 tcp_mark_push(tp, skb); 497 tcp_mark_push(tp, skb);
498 tcp_mark_urg(tp, flags, skb); 498 tcp_mark_urg(tp, flags, skb);
499 __tcp_push_pending_frames(sk, tp, mss_now, 499 __tcp_push_pending_frames(sk, mss_now,
500 (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle); 500 (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle);
501 } 501 }
502} 502}
@@ -526,13 +526,13 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse
526 goto do_error; 526 goto do_error;
527 527
528 while (psize > 0) { 528 while (psize > 0) {
529 struct sk_buff *skb = sk->sk_write_queue.prev; 529 struct sk_buff *skb = tcp_write_queue_tail(sk);
530 struct page *page = pages[poffset / PAGE_SIZE]; 530 struct page *page = pages[poffset / PAGE_SIZE];
531 int copy, i, can_coalesce; 531 int copy, i, can_coalesce;
532 int offset = poffset % PAGE_SIZE; 532 int offset = poffset % PAGE_SIZE;
533 int size = min_t(size_t, psize, PAGE_SIZE - offset); 533 int size = min_t(size_t, psize, PAGE_SIZE - offset);
534 534
535 if (!sk->sk_send_head || (copy = size_goal - skb->len) <= 0) { 535 if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) {
536new_segment: 536new_segment:
537 if (!sk_stream_memory_free(sk)) 537 if (!sk_stream_memory_free(sk))
538 goto wait_for_sndbuf; 538 goto wait_for_sndbuf;
@@ -542,7 +542,7 @@ new_segment:
542 if (!skb) 542 if (!skb)
543 goto wait_for_memory; 543 goto wait_for_memory;
544 544
545 skb_entail(sk, tp, skb); 545 skb_entail(sk, skb);
546 copy = size_goal; 546 copy = size_goal;
547 } 547 }
548 548
@@ -588,8 +588,8 @@ new_segment:
588 588
589 if (forced_push(tp)) { 589 if (forced_push(tp)) {
590 tcp_mark_push(tp, skb); 590 tcp_mark_push(tp, skb);
591 __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH); 591 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
592 } else if (skb == sk->sk_send_head) 592 } else if (skb == tcp_send_head(sk))
593 tcp_push_one(sk, mss_now); 593 tcp_push_one(sk, mss_now);
594 continue; 594 continue;
595 595
@@ -597,7 +597,7 @@ wait_for_sndbuf:
597 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 597 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
598wait_for_memory: 598wait_for_memory:
599 if (copied) 599 if (copied)
600 tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH); 600 tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
601 601
602 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) 602 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
603 goto do_error; 603 goto do_error;
@@ -608,7 +608,7 @@ wait_for_memory:
608 608
609out: 609out:
610 if (copied) 610 if (copied)
611 tcp_push(sk, tp, flags, mss_now, tp->nonagle); 611 tcp_push(sk, flags, mss_now, tp->nonagle);
612 return copied; 612 return copied;
613 613
614do_error: 614do_error:
@@ -639,8 +639,9 @@ ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset,
639#define TCP_PAGE(sk) (sk->sk_sndmsg_page) 639#define TCP_PAGE(sk) (sk->sk_sndmsg_page)
640#define TCP_OFF(sk) (sk->sk_sndmsg_off) 640#define TCP_OFF(sk) (sk->sk_sndmsg_off)
641 641
642static inline int select_size(struct sock *sk, struct tcp_sock *tp) 642static inline int select_size(struct sock *sk)
643{ 643{
644 struct tcp_sock *tp = tcp_sk(sk);
644 int tmp = tp->mss_cache; 645 int tmp = tp->mss_cache;
645 646
646 if (sk->sk_route_caps & NETIF_F_SG) { 647 if (sk->sk_route_caps & NETIF_F_SG) {
@@ -704,9 +705,9 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
704 while (seglen > 0) { 705 while (seglen > 0) {
705 int copy; 706 int copy;
706 707
707 skb = sk->sk_write_queue.prev; 708 skb = tcp_write_queue_tail(sk);
708 709
709 if (!sk->sk_send_head || 710 if (!tcp_send_head(sk) ||
710 (copy = size_goal - skb->len) <= 0) { 711 (copy = size_goal - skb->len) <= 0) {
711 712
712new_segment: 713new_segment:
@@ -716,7 +717,7 @@ new_segment:
716 if (!sk_stream_memory_free(sk)) 717 if (!sk_stream_memory_free(sk))
717 goto wait_for_sndbuf; 718 goto wait_for_sndbuf;
718 719
719 skb = sk_stream_alloc_pskb(sk, select_size(sk, tp), 720 skb = sk_stream_alloc_pskb(sk, select_size(sk),
720 0, sk->sk_allocation); 721 0, sk->sk_allocation);
721 if (!skb) 722 if (!skb)
722 goto wait_for_memory; 723 goto wait_for_memory;
@@ -727,7 +728,7 @@ new_segment:
727 if (sk->sk_route_caps & NETIF_F_ALL_CSUM) 728 if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
728 skb->ip_summed = CHECKSUM_PARTIAL; 729 skb->ip_summed = CHECKSUM_PARTIAL;
729 730
730 skb_entail(sk, tp, skb); 731 skb_entail(sk, skb);
731 copy = size_goal; 732 copy = size_goal;
732 } 733 }
733 734
@@ -832,8 +833,8 @@ new_segment:
832 833
833 if (forced_push(tp)) { 834 if (forced_push(tp)) {
834 tcp_mark_push(tp, skb); 835 tcp_mark_push(tp, skb);
835 __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH); 836 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
836 } else if (skb == sk->sk_send_head) 837 } else if (skb == tcp_send_head(sk))
837 tcp_push_one(sk, mss_now); 838 tcp_push_one(sk, mss_now);
838 continue; 839 continue;
839 840
@@ -841,7 +842,7 @@ wait_for_sndbuf:
841 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 842 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
842wait_for_memory: 843wait_for_memory:
843 if (copied) 844 if (copied)
844 tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH); 845 tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
845 846
846 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) 847 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
847 goto do_error; 848 goto do_error;
@@ -853,16 +854,18 @@ wait_for_memory:
853 854
854out: 855out:
855 if (copied) 856 if (copied)
856 tcp_push(sk, tp, flags, mss_now, tp->nonagle); 857 tcp_push(sk, flags, mss_now, tp->nonagle);
857 TCP_CHECK_TIMER(sk); 858 TCP_CHECK_TIMER(sk);
858 release_sock(sk); 859 release_sock(sk);
859 return copied; 860 return copied;
860 861
861do_fault: 862do_fault:
862 if (!skb->len) { 863 if (!skb->len) {
863 if (sk->sk_send_head == skb) 864 tcp_unlink_write_queue(skb, sk);
864 sk->sk_send_head = NULL; 865 /* It is the one place in all of TCP, except connection
865 __skb_unlink(skb, &sk->sk_write_queue); 866 * reset, where we can be unlinking the send_head.
867 */
868 tcp_check_send_head(sk, skb);
866 sk_stream_free_skb(sk, skb); 869 sk_stream_free_skb(sk, skb);
867 } 870 }
868 871
@@ -1016,9 +1019,9 @@ static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
1016 1019
1017 skb_queue_walk(&sk->sk_receive_queue, skb) { 1020 skb_queue_walk(&sk->sk_receive_queue, skb) {
1018 offset = seq - TCP_SKB_CB(skb)->seq; 1021 offset = seq - TCP_SKB_CB(skb)->seq;
1019 if (skb->h.th->syn) 1022 if (tcp_hdr(skb)->syn)
1020 offset--; 1023 offset--;
1021 if (offset < skb->len || skb->h.th->fin) { 1024 if (offset < skb->len || tcp_hdr(skb)->fin) {
1022 *off = offset; 1025 *off = offset;
1023 return skb; 1026 return skb;
1024 } 1027 }
@@ -1070,7 +1073,7 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1070 if (offset != skb->len) 1073 if (offset != skb->len)
1071 break; 1074 break;
1072 } 1075 }
1073 if (skb->h.th->fin) { 1076 if (tcp_hdr(skb)->fin) {
1074 sk_eat_skb(sk, skb, 0); 1077 sk_eat_skb(sk, skb, 0);
1075 ++seq; 1078 ++seq;
1076 break; 1079 break;
@@ -1174,11 +1177,11 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1174 break; 1177 break;
1175 } 1178 }
1176 offset = *seq - TCP_SKB_CB(skb)->seq; 1179 offset = *seq - TCP_SKB_CB(skb)->seq;
1177 if (skb->h.th->syn) 1180 if (tcp_hdr(skb)->syn)
1178 offset--; 1181 offset--;
1179 if (offset < skb->len) 1182 if (offset < skb->len)
1180 goto found_ok_skb; 1183 goto found_ok_skb;
1181 if (skb->h.th->fin) 1184 if (tcp_hdr(skb)->fin)
1182 goto found_fin_ok; 1185 goto found_fin_ok;
1183 BUG_TRAP(flags & MSG_PEEK); 1186 BUG_TRAP(flags & MSG_PEEK);
1184 skb = skb->next; 1187 skb = skb->next;
@@ -1389,12 +1392,12 @@ do_prequeue:
1389skip_copy: 1392skip_copy:
1390 if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) { 1393 if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) {
1391 tp->urg_data = 0; 1394 tp->urg_data = 0;
1392 tcp_fast_path_check(sk, tp); 1395 tcp_fast_path_check(sk);
1393 } 1396 }
1394 if (used + offset < skb->len) 1397 if (used + offset < skb->len)
1395 continue; 1398 continue;
1396 1399
1397 if (skb->h.th->fin) 1400 if (tcp_hdr(skb)->fin)
1398 goto found_fin_ok; 1401 goto found_fin_ok;
1399 if (!(flags & MSG_PEEK)) { 1402 if (!(flags & MSG_PEEK)) {
1400 sk_eat_skb(sk, skb, copied_early); 1403 sk_eat_skb(sk, skb, copied_early);
@@ -1563,7 +1566,7 @@ void tcp_close(struct sock *sk, long timeout)
1563 */ 1566 */
1564 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { 1567 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
1565 u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq - 1568 u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq -
1566 skb->h.th->fin; 1569 tcp_hdr(skb)->fin;
1567 data_was_unread += len; 1570 data_was_unread += len;
1568 __kfree_skb(skb); 1571 __kfree_skb(skb);
1569 } 1572 }
@@ -1732,7 +1735,7 @@ int tcp_disconnect(struct sock *sk, int flags)
1732 1735
1733 tcp_clear_xmit_timers(sk); 1736 tcp_clear_xmit_timers(sk);
1734 __skb_queue_purge(&sk->sk_receive_queue); 1737 __skb_queue_purge(&sk->sk_receive_queue);
1735 sk_stream_writequeue_purge(sk); 1738 tcp_write_queue_purge(sk);
1736 __skb_queue_purge(&tp->out_of_order_queue); 1739 __skb_queue_purge(&tp->out_of_order_queue);
1737#ifdef CONFIG_NET_DMA 1740#ifdef CONFIG_NET_DMA
1738 __skb_queue_purge(&sk->sk_async_wait_queue); 1741 __skb_queue_purge(&sk->sk_async_wait_queue);
@@ -1758,7 +1761,7 @@ int tcp_disconnect(struct sock *sk, int flags)
1758 tcp_set_ca_state(sk, TCP_CA_Open); 1761 tcp_set_ca_state(sk, TCP_CA_Open);
1759 tcp_clear_retrans(tp); 1762 tcp_clear_retrans(tp);
1760 inet_csk_delack_init(sk); 1763 inet_csk_delack_init(sk);
1761 sk->sk_send_head = NULL; 1764 tcp_init_send_head(sk);
1762 tp->rx_opt.saw_tstamp = 0; 1765 tp->rx_opt.saw_tstamp = 0;
1763 tcp_sack_reset(&tp->rx_opt); 1766 tcp_sack_reset(&tp->rx_opt);
1764 __sk_dst_reset(sk); 1767 __sk_dst_reset(sk);
@@ -1830,7 +1833,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
1830 * for currently queued segments. 1833 * for currently queued segments.
1831 */ 1834 */
1832 tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH; 1835 tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH;
1833 tcp_push_pending_frames(sk, tp); 1836 tcp_push_pending_frames(sk);
1834 } else { 1837 } else {
1835 tp->nonagle &= ~TCP_NAGLE_OFF; 1838 tp->nonagle &= ~TCP_NAGLE_OFF;
1836 } 1839 }
@@ -1854,7 +1857,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
1854 tp->nonagle &= ~TCP_NAGLE_CORK; 1857 tp->nonagle &= ~TCP_NAGLE_CORK;
1855 if (tp->nonagle&TCP_NAGLE_OFF) 1858 if (tp->nonagle&TCP_NAGLE_OFF)
1856 tp->nonagle |= TCP_NAGLE_PUSH; 1859 tp->nonagle |= TCP_NAGLE_PUSH;
1857 tcp_push_pending_frames(sk, tp); 1860 tcp_push_pending_frames(sk);
1858 } 1861 }
1859 break; 1862 break;
1860 1863
@@ -1954,7 +1957,8 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
1954 default: 1957 default:
1955 err = -ENOPROTOOPT; 1958 err = -ENOPROTOOPT;
1956 break; 1959 break;
1957 }; 1960 }
1961
1958 release_sock(sk); 1962 release_sock(sk);
1959 return err; 1963 return err;
1960} 1964}
@@ -2124,7 +2128,7 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
2124 return 0; 2128 return 0;
2125 default: 2129 default:
2126 return -ENOPROTOOPT; 2130 return -ENOPROTOOPT;
2127 }; 2131 }
2128 2132
2129 if (put_user(len, optlen)) 2133 if (put_user(len, optlen))
2130 return -EFAULT; 2134 return -EFAULT;
@@ -2170,7 +2174,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
2170 if (!pskb_may_pull(skb, sizeof(*th))) 2174 if (!pskb_may_pull(skb, sizeof(*th)))
2171 goto out; 2175 goto out;
2172 2176
2173 th = skb->h.th; 2177 th = tcp_hdr(skb);
2174 thlen = th->doff * 4; 2178 thlen = th->doff * 4;
2175 if (thlen < sizeof(*th)) 2179 if (thlen < sizeof(*th))
2176 goto out; 2180 goto out;
@@ -2210,7 +2214,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
2210 delta = htonl(oldlen + (thlen + len)); 2214 delta = htonl(oldlen + (thlen + len));
2211 2215
2212 skb = segs; 2216 skb = segs;
2213 th = skb->h.th; 2217 th = tcp_hdr(skb);
2214 seq = ntohl(th->seq); 2218 seq = ntohl(th->seq);
2215 2219
2216 do { 2220 do {
@@ -2219,23 +2223,25 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
2219 th->check = ~csum_fold((__force __wsum)((__force u32)th->check + 2223 th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
2220 (__force u32)delta)); 2224 (__force u32)delta));
2221 if (skb->ip_summed != CHECKSUM_PARTIAL) 2225 if (skb->ip_summed != CHECKSUM_PARTIAL)
2222 th->check = csum_fold(csum_partial(skb->h.raw, thlen, 2226 th->check =
2223 skb->csum)); 2227 csum_fold(csum_partial(skb_transport_header(skb),
2228 thlen, skb->csum));
2224 2229
2225 seq += len; 2230 seq += len;
2226 skb = skb->next; 2231 skb = skb->next;
2227 th = skb->h.th; 2232 th = tcp_hdr(skb);
2228 2233
2229 th->seq = htonl(seq); 2234 th->seq = htonl(seq);
2230 th->cwr = 0; 2235 th->cwr = 0;
2231 } while (skb->next); 2236 } while (skb->next);
2232 2237
2233 delta = htonl(oldlen + (skb->tail - skb->h.raw) + skb->data_len); 2238 delta = htonl(oldlen + (skb->tail - skb->transport_header) +
2239 skb->data_len);
2234 th->check = ~csum_fold((__force __wsum)((__force u32)th->check + 2240 th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
2235 (__force u32)delta)); 2241 (__force u32)delta));
2236 if (skb->ip_summed != CHECKSUM_PARTIAL) 2242 if (skb->ip_summed != CHECKSUM_PARTIAL)
2237 th->check = csum_fold(csum_partial(skb->h.raw, thlen, 2243 th->check = csum_fold(csum_partial(skb_transport_header(skb),
2238 skb->csum)); 2244 thlen, skb->csum));
2239 2245
2240out: 2246out:
2241 return segs; 2247 return segs;
@@ -2372,6 +2378,23 @@ void __tcp_put_md5sig_pool(void)
2372EXPORT_SYMBOL(__tcp_put_md5sig_pool); 2378EXPORT_SYMBOL(__tcp_put_md5sig_pool);
2373#endif 2379#endif
2374 2380
2381void tcp_done(struct sock *sk)
2382{
2383 if(sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
2384 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
2385
2386 tcp_set_state(sk, TCP_CLOSE);
2387 tcp_clear_xmit_timers(sk);
2388
2389 sk->sk_shutdown = SHUTDOWN_MASK;
2390
2391 if (!sock_flag(sk, SOCK_DEAD))
2392 sk->sk_state_change(sk);
2393 else
2394 inet_csk_destroy_sock(sk);
2395}
2396EXPORT_SYMBOL_GPL(tcp_done);
2397
2375extern void __skb_cb_too_small_for_tcp(int, int); 2398extern void __skb_cb_too_small_for_tcp(int, int);
2376extern struct tcp_congestion_ops tcp_reno; 2399extern struct tcp_congestion_ops tcp_reno;
2377 2400