diff options
Diffstat (limited to 'net/ipv4/tcp_input.c')
| -rw-r--r-- | net/ipv4/tcp_input.c | 116 |
1 files changed, 58 insertions, 58 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index c6109895bb5e..1a14191687ac 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
| @@ -50,9 +50,9 @@ | |||
| 50 | * Andi Kleen: Make sure we never ack data there is not | 50 | * Andi Kleen: Make sure we never ack data there is not |
| 51 | * enough room for. Also make this condition | 51 | * enough room for. Also make this condition |
| 52 | * a fatal error if it might still happen. | 52 | * a fatal error if it might still happen. |
| 53 | * Andi Kleen: Add tcp_measure_rcv_mss to make | 53 | * Andi Kleen: Add tcp_measure_rcv_mss to make |
| 54 | * connections with MSS<min(MTU,ann. MSS) | 54 | * connections with MSS<min(MTU,ann. MSS) |
| 55 | * work without delayed acks. | 55 | * work without delayed acks. |
| 56 | * Andi Kleen: Process packets with PSH set in the | 56 | * Andi Kleen: Process packets with PSH set in the |
| 57 | * fast path. | 57 | * fast path. |
| 58 | * J Hadi Salim: ECN support | 58 | * J Hadi Salim: ECN support |
| @@ -112,17 +112,17 @@ int sysctl_tcp_abc __read_mostly; | |||
| 112 | 112 | ||
| 113 | #define TCP_REMNANT (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN|TCP_FLAG_PSH) | 113 | #define TCP_REMNANT (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN|TCP_FLAG_PSH) |
| 114 | 114 | ||
| 115 | /* Adapt the MSS value used to make delayed ack decision to the | 115 | /* Adapt the MSS value used to make delayed ack decision to the |
| 116 | * real world. | 116 | * real world. |
| 117 | */ | 117 | */ |
| 118 | static void tcp_measure_rcv_mss(struct sock *sk, | 118 | static void tcp_measure_rcv_mss(struct sock *sk, |
| 119 | const struct sk_buff *skb) | 119 | const struct sk_buff *skb) |
| 120 | { | 120 | { |
| 121 | struct inet_connection_sock *icsk = inet_csk(sk); | 121 | struct inet_connection_sock *icsk = inet_csk(sk); |
| 122 | const unsigned int lss = icsk->icsk_ack.last_seg_size; | 122 | const unsigned int lss = icsk->icsk_ack.last_seg_size; |
| 123 | unsigned int len; | 123 | unsigned int len; |
| 124 | 124 | ||
| 125 | icsk->icsk_ack.last_seg_size = 0; | 125 | icsk->icsk_ack.last_seg_size = 0; |
| 126 | 126 | ||
| 127 | /* skb->len may jitter because of SACKs, even if peer | 127 | /* skb->len may jitter because of SACKs, even if peer |
| 128 | * sends good full-sized frames. | 128 | * sends good full-sized frames. |
| @@ -440,15 +440,15 @@ void tcp_rcv_space_adjust(struct sock *sk) | |||
| 440 | struct tcp_sock *tp = tcp_sk(sk); | 440 | struct tcp_sock *tp = tcp_sk(sk); |
| 441 | int time; | 441 | int time; |
| 442 | int space; | 442 | int space; |
| 443 | 443 | ||
| 444 | if (tp->rcvq_space.time == 0) | 444 | if (tp->rcvq_space.time == 0) |
| 445 | goto new_measure; | 445 | goto new_measure; |
| 446 | 446 | ||
| 447 | time = tcp_time_stamp - tp->rcvq_space.time; | 447 | time = tcp_time_stamp - tp->rcvq_space.time; |
| 448 | if (time < (tp->rcv_rtt_est.rtt >> 3) || | 448 | if (time < (tp->rcv_rtt_est.rtt >> 3) || |
| 449 | tp->rcv_rtt_est.rtt == 0) | 449 | tp->rcv_rtt_est.rtt == 0) |
| 450 | return; | 450 | return; |
| 451 | 451 | ||
| 452 | space = 2 * (tp->copied_seq - tp->rcvq_space.seq); | 452 | space = 2 * (tp->copied_seq - tp->rcvq_space.seq); |
| 453 | 453 | ||
| 454 | space = max(tp->rcvq_space.space, space); | 454 | space = max(tp->rcvq_space.space, space); |
| @@ -483,7 +483,7 @@ void tcp_rcv_space_adjust(struct sock *sk) | |||
| 483 | } | 483 | } |
| 484 | } | 484 | } |
| 485 | } | 485 | } |
| 486 | 486 | ||
| 487 | new_measure: | 487 | new_measure: |
| 488 | tp->rcvq_space.seq = tp->copied_seq; | 488 | tp->rcvq_space.seq = tp->copied_seq; |
| 489 | tp->rcvq_space.time = tcp_time_stamp; | 489 | tp->rcvq_space.time = tcp_time_stamp; |
| @@ -509,7 +509,7 @@ static void tcp_event_data_recv(struct sock *sk, struct tcp_sock *tp, struct sk_ | |||
| 509 | tcp_measure_rcv_mss(sk, skb); | 509 | tcp_measure_rcv_mss(sk, skb); |
| 510 | 510 | ||
| 511 | tcp_rcv_rtt_measure(tp); | 511 | tcp_rcv_rtt_measure(tp); |
| 512 | 512 | ||
| 513 | now = tcp_time_stamp; | 513 | now = tcp_time_stamp; |
| 514 | 514 | ||
| 515 | if (!icsk->icsk_ack.ato) { | 515 | if (!icsk->icsk_ack.ato) { |
| @@ -561,7 +561,7 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt) | |||
| 561 | /* The following amusing code comes from Jacobson's | 561 | /* The following amusing code comes from Jacobson's |
| 562 | * article in SIGCOMM '88. Note that rtt and mdev | 562 | * article in SIGCOMM '88. Note that rtt and mdev |
| 563 | * are scaled versions of rtt and mean deviation. | 563 | * are scaled versions of rtt and mean deviation. |
| 564 | * This is designed to be as fast as possible | 564 | * This is designed to be as fast as possible |
| 565 | * m stands for "measurement". | 565 | * m stands for "measurement". |
| 566 | * | 566 | * |
| 567 | * On a 1990 paper the rto value is changed to: | 567 | * On a 1990 paper the rto value is changed to: |
| @@ -1249,8 +1249,8 @@ void tcp_enter_frto(struct sock *sk) | |||
| 1249 | tp->frto_counter = 1; | 1249 | tp->frto_counter = 1; |
| 1250 | 1250 | ||
| 1251 | if (icsk->icsk_ca_state <= TCP_CA_Disorder || | 1251 | if (icsk->icsk_ca_state <= TCP_CA_Disorder || |
| 1252 | tp->snd_una == tp->high_seq || | 1252 | tp->snd_una == tp->high_seq || |
| 1253 | (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) { | 1253 | (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) { |
| 1254 | tp->prior_ssthresh = tcp_current_ssthresh(sk); | 1254 | tp->prior_ssthresh = tcp_current_ssthresh(sk); |
| 1255 | tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); | 1255 | tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); |
| 1256 | tcp_ca_event(sk, CA_EVENT_FRTO); | 1256 | tcp_ca_event(sk, CA_EVENT_FRTO); |
| @@ -1969,11 +1969,11 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una, | |||
| 1969 | * 1. Reno does not count dupacks (sacked_out) automatically. */ | 1969 | * 1. Reno does not count dupacks (sacked_out) automatically. */ |
| 1970 | if (!tp->packets_out) | 1970 | if (!tp->packets_out) |
| 1971 | tp->sacked_out = 0; | 1971 | tp->sacked_out = 0; |
| 1972 | /* 2. SACK counts snd_fack in packets inaccurately. */ | 1972 | /* 2. SACK counts snd_fack in packets inaccurately. */ |
| 1973 | if (tp->sacked_out == 0) | 1973 | if (tp->sacked_out == 0) |
| 1974 | tp->fackets_out = 0; | 1974 | tp->fackets_out = 0; |
| 1975 | 1975 | ||
| 1976 | /* Now state machine starts. | 1976 | /* Now state machine starts. |
| 1977 | * A. ECE, hence prohibit cwnd undoing, the reduction is required. */ | 1977 | * A. ECE, hence prohibit cwnd undoing, the reduction is required. */ |
| 1978 | if (flag&FLAG_ECE) | 1978 | if (flag&FLAG_ECE) |
| 1979 | tp->prior_ssthresh = 0; | 1979 | tp->prior_ssthresh = 0; |
| @@ -2203,7 +2203,7 @@ static int tcp_tso_acked(struct sock *sk, struct sk_buff *skb, | |||
| 2203 | __u32 now, __s32 *seq_rtt) | 2203 | __u32 now, __s32 *seq_rtt) |
| 2204 | { | 2204 | { |
| 2205 | struct tcp_sock *tp = tcp_sk(sk); | 2205 | struct tcp_sock *tp = tcp_sk(sk); |
| 2206 | struct tcp_skb_cb *scb = TCP_SKB_CB(skb); | 2206 | struct tcp_skb_cb *scb = TCP_SKB_CB(skb); |
| 2207 | __u32 seq = tp->snd_una; | 2207 | __u32 seq = tp->snd_una; |
| 2208 | __u32 packets_acked; | 2208 | __u32 packets_acked; |
| 2209 | int acked = 0; | 2209 | int acked = 0; |
| @@ -2279,7 +2279,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p) | |||
| 2279 | 2279 | ||
| 2280 | while ((skb = skb_peek(&sk->sk_write_queue)) && | 2280 | while ((skb = skb_peek(&sk->sk_write_queue)) && |
| 2281 | skb != sk->sk_send_head) { | 2281 | skb != sk->sk_send_head) { |
| 2282 | struct tcp_skb_cb *scb = TCP_SKB_CB(skb); | 2282 | struct tcp_skb_cb *scb = TCP_SKB_CB(skb); |
| 2283 | __u8 sacked = scb->sacked; | 2283 | __u8 sacked = scb->sacked; |
| 2284 | 2284 | ||
| 2285 | /* If our packet is before the ack sequence we can | 2285 | /* If our packet is before the ack sequence we can |
| @@ -2470,9 +2470,9 @@ static int tcp_ack_update_window(struct sock *sk, struct tcp_sock *tp, | |||
| 2470 | static void tcp_process_frto(struct sock *sk, u32 prior_snd_una) | 2470 | static void tcp_process_frto(struct sock *sk, u32 prior_snd_una) |
| 2471 | { | 2471 | { |
| 2472 | struct tcp_sock *tp = tcp_sk(sk); | 2472 | struct tcp_sock *tp = tcp_sk(sk); |
| 2473 | 2473 | ||
| 2474 | tcp_sync_left_out(tp); | 2474 | tcp_sync_left_out(tp); |
| 2475 | 2475 | ||
| 2476 | if (tp->snd_una == prior_snd_una || | 2476 | if (tp->snd_una == prior_snd_una || |
| 2477 | !before(tp->snd_una, tp->frto_highmark)) { | 2477 | !before(tp->snd_una, tp->frto_highmark)) { |
| 2478 | /* RTO was caused by loss, start retransmitting in | 2478 | /* RTO was caused by loss, start retransmitting in |
| @@ -2627,7 +2627,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, | |||
| 2627 | opt_rx->saw_tstamp = 0; | 2627 | opt_rx->saw_tstamp = 0; |
| 2628 | 2628 | ||
| 2629 | while(length>0) { | 2629 | while(length>0) { |
| 2630 | int opcode=*ptr++; | 2630 | int opcode=*ptr++; |
| 2631 | int opsize; | 2631 | int opsize; |
| 2632 | 2632 | ||
| 2633 | switch (opcode) { | 2633 | switch (opcode) { |
| @@ -2642,7 +2642,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, | |||
| 2642 | return; | 2642 | return; |
| 2643 | if (opsize > length) | 2643 | if (opsize > length) |
| 2644 | return; /* don't parse partial options */ | 2644 | return; /* don't parse partial options */ |
| 2645 | switch(opcode) { | 2645 | switch(opcode) { |
| 2646 | case TCPOPT_MSS: | 2646 | case TCPOPT_MSS: |
| 2647 | if(opsize==TCPOLEN_MSS && th->syn && !estab) { | 2647 | if(opsize==TCPOLEN_MSS && th->syn && !estab) { |
| 2648 | u16 in_mss = ntohs(get_unaligned((__be16 *)ptr)); | 2648 | u16 in_mss = ntohs(get_unaligned((__be16 *)ptr)); |
| @@ -2701,10 +2701,10 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, | |||
| 2701 | */ | 2701 | */ |
| 2702 | break; | 2702 | break; |
| 2703 | #endif | 2703 | #endif |
| 2704 | }; | 2704 | }; |
| 2705 | ptr+=opsize-2; | 2705 | ptr+=opsize-2; |
| 2706 | length-=opsize; | 2706 | length-=opsize; |
| 2707 | }; | 2707 | }; |
| 2708 | } | 2708 | } |
| 2709 | } | 2709 | } |
| 2710 | 2710 | ||
| @@ -3263,7 +3263,7 @@ drop: | |||
| 3263 | TCP_SKB_CB(skb)->end_seq); | 3263 | TCP_SKB_CB(skb)->end_seq); |
| 3264 | 3264 | ||
| 3265 | tcp_dsack_set(tp, TCP_SKB_CB(skb)->seq, tp->rcv_nxt); | 3265 | tcp_dsack_set(tp, TCP_SKB_CB(skb)->seq, tp->rcv_nxt); |
| 3266 | 3266 | ||
| 3267 | /* If window is closed, drop tail of packet. But after | 3267 | /* If window is closed, drop tail of packet. But after |
| 3268 | * remembering D-SACK for its head made in previous line. | 3268 | * remembering D-SACK for its head made in previous line. |
| 3269 | */ | 3269 | */ |
| @@ -3342,7 +3342,7 @@ drop: | |||
| 3342 | } | 3342 | } |
| 3343 | } | 3343 | } |
| 3344 | __skb_insert(skb, skb1, skb1->next, &tp->out_of_order_queue); | 3344 | __skb_insert(skb, skb1, skb1->next, &tp->out_of_order_queue); |
| 3345 | 3345 | ||
| 3346 | /* And clean segments covered by new one as whole. */ | 3346 | /* And clean segments covered by new one as whole. */ |
| 3347 | while ((skb1 = skb->next) != | 3347 | while ((skb1 = skb->next) != |
| 3348 | (struct sk_buff*)&tp->out_of_order_queue && | 3348 | (struct sk_buff*)&tp->out_of_order_queue && |
| @@ -3507,7 +3507,7 @@ static void tcp_collapse_ofo_queue(struct sock *sk) | |||
| 3507 | */ | 3507 | */ |
| 3508 | static int tcp_prune_queue(struct sock *sk) | 3508 | static int tcp_prune_queue(struct sock *sk) |
| 3509 | { | 3509 | { |
| 3510 | struct tcp_sock *tp = tcp_sk(sk); | 3510 | struct tcp_sock *tp = tcp_sk(sk); |
| 3511 | 3511 | ||
| 3512 | SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq); | 3512 | SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq); |
| 3513 | 3513 | ||
| @@ -3617,7 +3617,7 @@ static void tcp_new_space(struct sock *sk) | |||
| 3617 | struct tcp_sock *tp = tcp_sk(sk); | 3617 | struct tcp_sock *tp = tcp_sk(sk); |
| 3618 | 3618 | ||
| 3619 | if (tcp_should_expand_sndbuf(sk, tp)) { | 3619 | if (tcp_should_expand_sndbuf(sk, tp)) { |
| 3620 | int sndmem = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) + | 3620 | int sndmem = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) + |
| 3621 | MAX_TCP_HEADER + 16 + sizeof(struct sk_buff), | 3621 | MAX_TCP_HEADER + 16 + sizeof(struct sk_buff), |
| 3622 | demanded = max_t(unsigned int, tp->snd_cwnd, | 3622 | demanded = max_t(unsigned int, tp->snd_cwnd, |
| 3623 | tp->reordering + 1); | 3623 | tp->reordering + 1); |
| @@ -3690,7 +3690,7 @@ static inline void tcp_ack_snd_check(struct sock *sk) | |||
| 3690 | * For 1003.1g we should support a new option TCP_STDURG to permit | 3690 | * For 1003.1g we should support a new option TCP_STDURG to permit |
| 3691 | * either form (or just set the sysctl tcp_stdurg). | 3691 | * either form (or just set the sysctl tcp_stdurg). |
| 3692 | */ | 3692 | */ |
| 3693 | 3693 | ||
| 3694 | static void tcp_check_urg(struct sock * sk, struct tcphdr * th) | 3694 | static void tcp_check_urg(struct sock * sk, struct tcphdr * th) |
| 3695 | { | 3695 | { |
| 3696 | struct tcp_sock *tp = tcp_sk(sk); | 3696 | struct tcp_sock *tp = tcp_sk(sk); |
| @@ -3771,7 +3771,7 @@ static void tcp_urg(struct sock *sk, struct sk_buff *skb, struct tcphdr *th) | |||
| 3771 | u32 ptr = tp->urg_seq - ntohl(th->seq) + (th->doff * 4) - | 3771 | u32 ptr = tp->urg_seq - ntohl(th->seq) + (th->doff * 4) - |
| 3772 | th->syn; | 3772 | th->syn; |
| 3773 | 3773 | ||
| 3774 | /* Is the urgent pointer pointing into this packet? */ | 3774 | /* Is the urgent pointer pointing into this packet? */ |
| 3775 | if (ptr < skb->len) { | 3775 | if (ptr < skb->len) { |
| 3776 | u8 tmp; | 3776 | u8 tmp; |
| 3777 | if (skb_copy_bits(skb, ptr, &tmp, 1)) | 3777 | if (skb_copy_bits(skb, ptr, &tmp, 1)) |
| @@ -3835,7 +3835,7 @@ static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb, int hlen | |||
| 3835 | int copied_early = 0; | 3835 | int copied_early = 0; |
| 3836 | 3836 | ||
| 3837 | if (tp->ucopy.wakeup) | 3837 | if (tp->ucopy.wakeup) |
| 3838 | return 0; | 3838 | return 0; |
| 3839 | 3839 | ||
| 3840 | if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) | 3840 | if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) |
| 3841 | tp->ucopy.dma_chan = get_softnet_dma(); | 3841 | tp->ucopy.dma_chan = get_softnet_dma(); |
| @@ -3871,26 +3871,26 @@ out: | |||
| 3871 | #endif /* CONFIG_NET_DMA */ | 3871 | #endif /* CONFIG_NET_DMA */ |
| 3872 | 3872 | ||
| 3873 | /* | 3873 | /* |
| 3874 | * TCP receive function for the ESTABLISHED state. | 3874 | * TCP receive function for the ESTABLISHED state. |
| 3875 | * | 3875 | * |
| 3876 | * It is split into a fast path and a slow path. The fast path is | 3876 | * It is split into a fast path and a slow path. The fast path is |
| 3877 | * disabled when: | 3877 | * disabled when: |
| 3878 | * - A zero window was announced from us - zero window probing | 3878 | * - A zero window was announced from us - zero window probing |
| 3879 | * is only handled properly in the slow path. | 3879 | * is only handled properly in the slow path. |
| 3880 | * - Out of order segments arrived. | 3880 | * - Out of order segments arrived. |
| 3881 | * - Urgent data is expected. | 3881 | * - Urgent data is expected. |
| 3882 | * - There is no buffer space left | 3882 | * - There is no buffer space left |
| 3883 | * - Unexpected TCP flags/window values/header lengths are received | 3883 | * - Unexpected TCP flags/window values/header lengths are received |
| 3884 | * (detected by checking the TCP header against pred_flags) | 3884 | * (detected by checking the TCP header against pred_flags) |
| 3885 | * - Data is sent in both directions. Fast path only supports pure senders | 3885 | * - Data is sent in both directions. Fast path only supports pure senders |
| 3886 | * or pure receivers (this means either the sequence number or the ack | 3886 | * or pure receivers (this means either the sequence number or the ack |
| 3887 | * value must stay constant) | 3887 | * value must stay constant) |
| 3888 | * - Unexpected TCP option. | 3888 | * - Unexpected TCP option. |
| 3889 | * | 3889 | * |
| 3890 | * When these conditions are not satisfied it drops into a standard | 3890 | * When these conditions are not satisfied it drops into a standard |
| 3891 | * receive procedure patterned after RFC793 to handle all cases. | 3891 | * receive procedure patterned after RFC793 to handle all cases. |
| 3892 | * The first three cases are guaranteed by proper pred_flags setting, | 3892 | * The first three cases are guaranteed by proper pred_flags setting, |
| 3893 | * the rest is checked inline. Fast processing is turned on in | 3893 | * the rest is checked inline. Fast processing is turned on in |
| 3894 | * tcp_data_queue when everything is OK. | 3894 | * tcp_data_queue when everything is OK. |
| 3895 | */ | 3895 | */ |
| 3896 | int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, | 3896 | int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, |
| @@ -3900,15 +3900,15 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
| 3900 | 3900 | ||
| 3901 | /* | 3901 | /* |
| 3902 | * Header prediction. | 3902 | * Header prediction. |
| 3903 | * The code loosely follows the one in the famous | 3903 | * The code loosely follows the one in the famous |
| 3904 | * "30 instruction TCP receive" Van Jacobson mail. | 3904 | * "30 instruction TCP receive" Van Jacobson mail. |
| 3905 | * | 3905 | * |
| 3906 | * Van's trick is to deposit buffers into socket queue | 3906 | * Van's trick is to deposit buffers into socket queue |
| 3907 | * on a device interrupt, to call tcp_recv function | 3907 | * on a device interrupt, to call tcp_recv function |
| 3908 | * on the receive process context and checksum and copy | 3908 | * on the receive process context and checksum and copy |
| 3909 | * the buffer to user space. smart... | 3909 | * the buffer to user space. smart... |
| 3910 | * | 3910 | * |
| 3911 | * Our current scheme is not silly either but we take the | 3911 | * Our current scheme is not silly either but we take the |
| 3912 | * extra cost of the net_bh soft interrupt processing... | 3912 | * extra cost of the net_bh soft interrupt processing... |
| 3913 | * We do checksum and copy also but from device to kernel. | 3913 | * We do checksum and copy also but from device to kernel. |
| 3914 | */ | 3914 | */ |
| @@ -3919,7 +3919,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
| 3919 | * if header_prediction is to be made | 3919 | * if header_prediction is to be made |
| 3920 | * 'S' will always be tp->tcp_header_len >> 2 | 3920 | * 'S' will always be tp->tcp_header_len >> 2 |
| 3921 | * '?' will be 0 for the fast path, otherwise pred_flags is 0 to | 3921 | * '?' will be 0 for the fast path, otherwise pred_flags is 0 to |
| 3922 | * turn it off (when there are holes in the receive | 3922 | * turn it off (when there are holes in the receive |
| 3923 | * space for instance) | 3923 | * space for instance) |
| 3924 | * PSH flag is ignored. | 3924 | * PSH flag is ignored. |
| 3925 | */ | 3925 | */ |
| @@ -3943,7 +3943,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
| 3943 | goto slow_path; | 3943 | goto slow_path; |
| 3944 | 3944 | ||
| 3945 | tp->rx_opt.saw_tstamp = 1; | 3945 | tp->rx_opt.saw_tstamp = 1; |
| 3946 | ++ptr; | 3946 | ++ptr; |
| 3947 | tp->rx_opt.rcv_tsval = ntohl(*ptr); | 3947 | tp->rx_opt.rcv_tsval = ntohl(*ptr); |
| 3948 | ++ptr; | 3948 | ++ptr; |
| 3949 | tp->rx_opt.rcv_tsecr = ntohl(*ptr); | 3949 | tp->rx_opt.rcv_tsecr = ntohl(*ptr); |
| @@ -3975,7 +3975,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
| 3975 | * on entry. | 3975 | * on entry. |
| 3976 | */ | 3976 | */ |
| 3977 | tcp_ack(sk, skb, 0); | 3977 | tcp_ack(sk, skb, 0); |
| 3978 | __kfree_skb(skb); | 3978 | __kfree_skb(skb); |
| 3979 | tcp_data_snd_check(sk, tp); | 3979 | tcp_data_snd_check(sk, tp); |
| 3980 | return 0; | 3980 | return 0; |
| 3981 | } else { /* Header too small */ | 3981 | } else { /* Header too small */ |
| @@ -4393,11 +4393,11 @@ reset_and_undo: | |||
| 4393 | 4393 | ||
| 4394 | /* | 4394 | /* |
| 4395 | * This function implements the receiving procedure of RFC 793 for | 4395 | * This function implements the receiving procedure of RFC 793 for |
| 4396 | * all states except ESTABLISHED and TIME_WAIT. | 4396 | * all states except ESTABLISHED and TIME_WAIT. |
| 4397 | * It's called from both tcp_v4_rcv and tcp_v6_rcv and should be | 4397 | * It's called from both tcp_v4_rcv and tcp_v6_rcv and should be |
| 4398 | * address independent. | 4398 | * address independent. |
| 4399 | */ | 4399 | */ |
| 4400 | 4400 | ||
| 4401 | int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | 4401 | int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, |
| 4402 | struct tcphdr *th, unsigned len) | 4402 | struct tcphdr *th, unsigned len) |
| 4403 | { | 4403 | { |
| @@ -4422,19 +4422,19 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
| 4422 | if (icsk->icsk_af_ops->conn_request(sk, skb) < 0) | 4422 | if (icsk->icsk_af_ops->conn_request(sk, skb) < 0) |
| 4423 | return 1; | 4423 | return 1; |
| 4424 | 4424 | ||
| 4425 | /* Now we have several options: In theory there is | 4425 | /* Now we have several options: In theory there is |
| 4426 | * nothing else in the frame. KA9Q has an option to | 4426 | * nothing else in the frame. KA9Q has an option to |
| 4427 | * send data with the syn, BSD accepts data with the | 4427 | * send data with the syn, BSD accepts data with the |
| 4428 | * syn up to the [to be] advertised window and | 4428 | * syn up to the [to be] advertised window and |
| 4429 | * Solaris 2.1 gives you a protocol error. For now | 4429 | * Solaris 2.1 gives you a protocol error. For now |
| 4430 | * we just ignore it, that fits the spec precisely | 4430 | * we just ignore it, that fits the spec precisely |
| 4431 | * and avoids incompatibilities. It would be nice in | 4431 | * and avoids incompatibilities. It would be nice in |
| 4432 | * future to drop through and process the data. | 4432 | * future to drop through and process the data. |
| 4433 | * | 4433 | * |
| 4434 | * Now that TTCP is starting to be used we ought to | 4434 | * Now that TTCP is starting to be used we ought to |
| 4435 | * queue this data. | 4435 | * queue this data. |
| 4436 | * But, this leaves one open to an easy denial of | 4436 | * But, this leaves one open to an easy denial of |
| 4437 | * service attack, and SYN cookies can't defend | 4437 | * service attack, and SYN cookies can't defend |
| 4438 | * against this problem. So, we drop the data | 4438 | * against this problem. So, we drop the data |
| 4439 | * in the interest of security over speed unless | 4439 | * in the interest of security over speed unless |
| 4440 | * it's still in use. | 4440 | * it's still in use. |
| @@ -4624,7 +4624,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
| 4624 | case TCP_FIN_WAIT1: | 4624 | case TCP_FIN_WAIT1: |
| 4625 | case TCP_FIN_WAIT2: | 4625 | case TCP_FIN_WAIT2: |
| 4626 | /* RFC 793 says to queue data in these states, | 4626 | /* RFC 793 says to queue data in these states, |
| 4627 | * RFC 1122 says we MUST send a reset. | 4627 | * RFC 1122 says we MUST send a reset. |
| 4628 | * BSD 4.4 also does reset. | 4628 | * BSD 4.4 also does reset. |
| 4629 | */ | 4629 | */ |
| 4630 | if (sk->sk_shutdown & RCV_SHUTDOWN) { | 4630 | if (sk->sk_shutdown & RCV_SHUTDOWN) { |
| @@ -4636,7 +4636,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
| 4636 | } | 4636 | } |
| 4637 | } | 4637 | } |
| 4638 | /* Fall through */ | 4638 | /* Fall through */ |
| 4639 | case TCP_ESTABLISHED: | 4639 | case TCP_ESTABLISHED: |
| 4640 | tcp_data_queue(sk, skb); | 4640 | tcp_data_queue(sk, skb); |
| 4641 | queued = 1; | 4641 | queued = 1; |
| 4642 | break; | 4642 | break; |
| @@ -4648,7 +4648,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
| 4648 | tcp_ack_snd_check(sk); | 4648 | tcp_ack_snd_check(sk); |
| 4649 | } | 4649 | } |
| 4650 | 4650 | ||
| 4651 | if (!queued) { | 4651 | if (!queued) { |
| 4652 | discard: | 4652 | discard: |
| 4653 | __kfree_skb(skb); | 4653 | __kfree_skb(skb); |
| 4654 | } | 4654 | } |
