aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
authorStephen Hemminger <shemminger@linux-foundation.org>2007-03-08 23:45:19 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2007-04-26 01:24:13 -0400
commit2de979bd7da9c8b39cc0aabb0ab5aa1516d929eb (patch)
treefa3ad76a09660b3b8dac1c53a64f202088c0764c /net/ipv4
parent132adf54639cf7dd9315e8df89c2faa59f6e46d9 (diff)
[TCP]: whitespace cleanup
Add whitespace around keywords. Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/tcp_hybla.c2
-rw-r--r--net/ipv4/tcp_input.c57
-rw-r--r--net/ipv4/tcp_minisocks.c6
-rw-r--r--net/ipv4/tcp_output.c34
-rw-r--r--net/ipv4/tcp_westwood.c2
5 files changed, 51 insertions, 50 deletions
diff --git a/net/ipv4/tcp_hybla.c b/net/ipv4/tcp_hybla.c
index 59e691d26f6..e5be3511722 100644
--- a/net/ipv4/tcp_hybla.c
+++ b/net/ipv4/tcp_hybla.c
@@ -144,7 +144,7 @@ static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
144 ca->snd_cwnd_cents += odd; 144 ca->snd_cwnd_cents += odd;
145 145
146 /* check when fractions goes >=128 and increase cwnd by 1. */ 146 /* check when fractions goes >=128 and increase cwnd by 1. */
147 while(ca->snd_cwnd_cents >= 128) { 147 while (ca->snd_cwnd_cents >= 128) {
148 tp->snd_cwnd++; 148 tp->snd_cwnd++;
149 ca->snd_cwnd_cents -= 128; 149 ca->snd_cwnd_cents -= 128;
150 tp->snd_cwnd_cnt = 0; 150 tp->snd_cwnd_cnt = 0;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 22d0bb03c5d..fb025608594 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -578,7 +578,7 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt)
578 * does not matter how to _calculate_ it. Seems, it was trap 578 * does not matter how to _calculate_ it. Seems, it was trap
579 * that VJ failed to avoid. 8) 579 * that VJ failed to avoid. 8)
580 */ 580 */
581 if(m == 0) 581 if (m == 0)
582 m = 1; 582 m = 1;
583 if (tp->srtt != 0) { 583 if (tp->srtt != 0) {
584 m -= (tp->srtt >> 3); /* m is now error in rtt est */ 584 m -= (tp->srtt >> 3); /* m is now error in rtt est */
@@ -1758,12 +1758,11 @@ static void tcp_mark_head_lost(struct sock *sk, struct tcp_sock *tp,
1758 1758
1759 /* clear xmit_retransmit_queue hints 1759 /* clear xmit_retransmit_queue hints
1760 * if this is beyond hint */ 1760 * if this is beyond hint */
1761 if(tp->retransmit_skb_hint != NULL && 1761 if (tp->retransmit_skb_hint != NULL &&
1762 before(TCP_SKB_CB(skb)->seq, 1762 before(TCP_SKB_CB(skb)->seq,
1763 TCP_SKB_CB(tp->retransmit_skb_hint)->seq)) { 1763 TCP_SKB_CB(tp->retransmit_skb_hint)->seq))
1764
1765 tp->retransmit_skb_hint = NULL; 1764 tp->retransmit_skb_hint = NULL;
1766 } 1765
1767 } 1766 }
1768 } 1767 }
1769 tcp_sync_left_out(tp); 1768 tcp_sync_left_out(tp);
@@ -2441,7 +2440,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p)
2441 2440
2442 if (sacked) { 2441 if (sacked) {
2443 if (sacked & TCPCB_RETRANS) { 2442 if (sacked & TCPCB_RETRANS) {
2444 if(sacked & TCPCB_SACKED_RETRANS) 2443 if (sacked & TCPCB_SACKED_RETRANS)
2445 tp->retrans_out -= tcp_skb_pcount(skb); 2444 tp->retrans_out -= tcp_skb_pcount(skb);
2446 acked |= FLAG_RETRANS_DATA_ACKED; 2445 acked |= FLAG_RETRANS_DATA_ACKED;
2447 seq_rtt = -1; 2446 seq_rtt = -1;
@@ -2840,7 +2839,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
2840 ptr = (unsigned char *)(th + 1); 2839 ptr = (unsigned char *)(th + 1);
2841 opt_rx->saw_tstamp = 0; 2840 opt_rx->saw_tstamp = 0;
2842 2841
2843 while(length>0) { 2842 while (length > 0) {
2844 int opcode=*ptr++; 2843 int opcode=*ptr++;
2845 int opsize; 2844 int opsize;
2846 2845
@@ -2856,9 +2855,9 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
2856 return; 2855 return;
2857 if (opsize > length) 2856 if (opsize > length)
2858 return; /* don't parse partial options */ 2857 return; /* don't parse partial options */
2859 switch(opcode) { 2858 switch (opcode) {
2860 case TCPOPT_MSS: 2859 case TCPOPT_MSS:
2861 if(opsize==TCPOLEN_MSS && th->syn && !estab) { 2860 if (opsize==TCPOLEN_MSS && th->syn && !estab) {
2862 u16 in_mss = ntohs(get_unaligned((__be16 *)ptr)); 2861 u16 in_mss = ntohs(get_unaligned((__be16 *)ptr));
2863 if (in_mss) { 2862 if (in_mss) {
2864 if (opt_rx->user_mss && opt_rx->user_mss < in_mss) 2863 if (opt_rx->user_mss && opt_rx->user_mss < in_mss)
@@ -2868,12 +2867,12 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
2868 } 2867 }
2869 break; 2868 break;
2870 case TCPOPT_WINDOW: 2869 case TCPOPT_WINDOW:
2871 if(opsize==TCPOLEN_WINDOW && th->syn && !estab) 2870 if (opsize==TCPOLEN_WINDOW && th->syn && !estab)
2872 if (sysctl_tcp_window_scaling) { 2871 if (sysctl_tcp_window_scaling) {
2873 __u8 snd_wscale = *(__u8 *) ptr; 2872 __u8 snd_wscale = *(__u8 *) ptr;
2874 opt_rx->wscale_ok = 1; 2873 opt_rx->wscale_ok = 1;
2875 if (snd_wscale > 14) { 2874 if (snd_wscale > 14) {
2876 if(net_ratelimit()) 2875 if (net_ratelimit())
2877 printk(KERN_INFO "tcp_parse_options: Illegal window " 2876 printk(KERN_INFO "tcp_parse_options: Illegal window "
2878 "scaling value %d >14 received.\n", 2877 "scaling value %d >14 received.\n",
2879 snd_wscale); 2878 snd_wscale);
@@ -2883,7 +2882,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
2883 } 2882 }
2884 break; 2883 break;
2885 case TCPOPT_TIMESTAMP: 2884 case TCPOPT_TIMESTAMP:
2886 if(opsize==TCPOLEN_TIMESTAMP) { 2885 if (opsize==TCPOLEN_TIMESTAMP) {
2887 if ((estab && opt_rx->tstamp_ok) || 2886 if ((estab && opt_rx->tstamp_ok) ||
2888 (!estab && sysctl_tcp_timestamps)) { 2887 (!estab && sysctl_tcp_timestamps)) {
2889 opt_rx->saw_tstamp = 1; 2888 opt_rx->saw_tstamp = 1;
@@ -2893,7 +2892,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
2893 } 2892 }
2894 break; 2893 break;
2895 case TCPOPT_SACK_PERM: 2894 case TCPOPT_SACK_PERM:
2896 if(opsize==TCPOLEN_SACK_PERM && th->syn && !estab) { 2895 if (opsize==TCPOLEN_SACK_PERM && th->syn && !estab) {
2897 if (sysctl_tcp_sack) { 2896 if (sysctl_tcp_sack) {
2898 opt_rx->sack_ok = 1; 2897 opt_rx->sack_ok = 1;
2899 tcp_sack_reset(opt_rx); 2898 tcp_sack_reset(opt_rx);
@@ -2902,7 +2901,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
2902 break; 2901 break;
2903 2902
2904 case TCPOPT_SACK: 2903 case TCPOPT_SACK:
2905 if((opsize >= (TCPOLEN_SACK_BASE + TCPOLEN_SACK_PERBLOCK)) && 2904 if ((opsize >= (TCPOLEN_SACK_BASE + TCPOLEN_SACK_PERBLOCK)) &&
2906 !((opsize - TCPOLEN_SACK_BASE) % TCPOLEN_SACK_PERBLOCK) && 2905 !((opsize - TCPOLEN_SACK_BASE) % TCPOLEN_SACK_PERBLOCK) &&
2907 opt_rx->sack_ok) { 2906 opt_rx->sack_ok) {
2908 TCP_SKB_CB(skb)->sacked = (ptr - 2) - (unsigned char *)th; 2907 TCP_SKB_CB(skb)->sacked = (ptr - 2) - (unsigned char *)th;
@@ -2964,7 +2963,7 @@ static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
2964 * Not only, also it occurs for expired timestamps. 2963 * Not only, also it occurs for expired timestamps.
2965 */ 2964 */
2966 2965
2967 if((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) >= 0 || 2966 if ((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) >= 0 ||
2968 get_seconds() >= tp->rx_opt.ts_recent_stamp + TCP_PAWS_24DAYS) 2967 get_seconds() >= tp->rx_opt.ts_recent_stamp + TCP_PAWS_24DAYS)
2969 tcp_store_ts_recent(tp); 2968 tcp_store_ts_recent(tp);
2970 } 2969 }
@@ -3223,7 +3222,7 @@ static void tcp_sack_maybe_coalesce(struct tcp_sock *tp)
3223 */ 3222 */
3224 tp->rx_opt.num_sacks--; 3223 tp->rx_opt.num_sacks--;
3225 tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + tp->rx_opt.dsack, 4 - tp->rx_opt.tstamp_ok); 3224 tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + tp->rx_opt.dsack, 4 - tp->rx_opt.tstamp_ok);
3226 for(i=this_sack; i < tp->rx_opt.num_sacks; i++) 3225 for (i=this_sack; i < tp->rx_opt.num_sacks; i++)
3227 sp[i] = sp[i+1]; 3226 sp[i] = sp[i+1];
3228 continue; 3227 continue;
3229 } 3228 }
@@ -3276,7 +3275,7 @@ static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq)
3276 tp->rx_opt.num_sacks--; 3275 tp->rx_opt.num_sacks--;
3277 sp--; 3276 sp--;
3278 } 3277 }
3279 for(; this_sack > 0; this_sack--, sp--) 3278 for (; this_sack > 0; this_sack--, sp--)
3280 *sp = *(sp-1); 3279 *sp = *(sp-1);
3281 3280
3282new_sack: 3281new_sack:
@@ -3302,7 +3301,7 @@ static void tcp_sack_remove(struct tcp_sock *tp)
3302 return; 3301 return;
3303 } 3302 }
3304 3303
3305 for(this_sack = 0; this_sack < num_sacks; ) { 3304 for (this_sack = 0; this_sack < num_sacks; ) {
3306 /* Check if the start of the sack is covered by RCV.NXT. */ 3305 /* Check if the start of the sack is covered by RCV.NXT. */
3307 if (!before(tp->rcv_nxt, sp->start_seq)) { 3306 if (!before(tp->rcv_nxt, sp->start_seq)) {
3308 int i; 3307 int i;
@@ -3358,7 +3357,7 @@ static void tcp_ofo_queue(struct sock *sk)
3358 __skb_unlink(skb, &tp->out_of_order_queue); 3357 __skb_unlink(skb, &tp->out_of_order_queue);
3359 __skb_queue_tail(&sk->sk_receive_queue, skb); 3358 __skb_queue_tail(&sk->sk_receive_queue, skb);
3360 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; 3359 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
3361 if(skb->h.th->fin) 3360 if (skb->h.th->fin)
3362 tcp_fin(skb, sk, skb->h.th); 3361 tcp_fin(skb, sk, skb->h.th);
3363 } 3362 }
3364} 3363}
@@ -3424,9 +3423,9 @@ queue_and_out:
3424 __skb_queue_tail(&sk->sk_receive_queue, skb); 3423 __skb_queue_tail(&sk->sk_receive_queue, skb);
3425 } 3424 }
3426 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; 3425 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
3427 if(skb->len) 3426 if (skb->len)
3428 tcp_event_data_recv(sk, tp, skb); 3427 tcp_event_data_recv(sk, tp, skb);
3429 if(th->fin) 3428 if (th->fin)
3430 tcp_fin(skb, sk, th); 3429 tcp_fin(skb, sk, th);
3431 3430
3432 if (!skb_queue_empty(&tp->out_of_order_queue)) { 3431 if (!skb_queue_empty(&tp->out_of_order_queue)) {
@@ -4323,7 +4322,7 @@ slow_path:
4323 goto discard; 4322 goto discard;
4324 } 4323 }
4325 4324
4326 if(th->rst) { 4325 if (th->rst) {
4327 tcp_reset(sk); 4326 tcp_reset(sk);
4328 goto discard; 4327 goto discard;
4329 } 4328 }
@@ -4338,7 +4337,7 @@ slow_path:
4338 } 4337 }
4339 4338
4340step5: 4339step5:
4341 if(th->ack) 4340 if (th->ack)
4342 tcp_ack(sk, skb, FLAG_SLOWPATH); 4341 tcp_ack(sk, skb, FLAG_SLOWPATH);
4343 4342
4344 tcp_rcv_rtt_measure_ts(sk, skb); 4343 tcp_rcv_rtt_measure_ts(sk, skb);
@@ -4626,13 +4625,13 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
4626 goto discard; 4625 goto discard;
4627 4626
4628 case TCP_LISTEN: 4627 case TCP_LISTEN:
4629 if(th->ack) 4628 if (th->ack)
4630 return 1; 4629 return 1;
4631 4630
4632 if(th->rst) 4631 if (th->rst)
4633 goto discard; 4632 goto discard;
4634 4633
4635 if(th->syn) { 4634 if (th->syn) {
4636 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0) 4635 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
4637 return 1; 4636 return 1;
4638 4637
@@ -4688,7 +4687,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
4688 } 4687 }
4689 4688
4690 /* step 2: check RST bit */ 4689 /* step 2: check RST bit */
4691 if(th->rst) { 4690 if (th->rst) {
4692 tcp_reset(sk); 4691 tcp_reset(sk);
4693 goto discard; 4692 goto discard;
4694 } 4693 }
@@ -4711,7 +4710,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
4711 if (th->ack) { 4710 if (th->ack) {
4712 int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH); 4711 int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH);
4713 4712
4714 switch(sk->sk_state) { 4713 switch (sk->sk_state) {
4715 case TCP_SYN_RECV: 4714 case TCP_SYN_RECV:
4716 if (acceptable) { 4715 if (acceptable) {
4717 tp->copied_seq = tp->rcv_nxt; 4716 tp->copied_seq = tp->rcv_nxt;
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index ac4ce48a659..463d2b24d2d 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -246,7 +246,7 @@ kill:
246 if (paws_reject) 246 if (paws_reject)
247 NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED); 247 NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
248 248
249 if(!th->rst) { 249 if (!th->rst) {
250 /* In this case we must reset the TIMEWAIT timer. 250 /* In this case we must reset the TIMEWAIT timer.
251 * 251 *
252 * If it is ACKless SYN it may be both old duplicate 252 * If it is ACKless SYN it may be both old duplicate
@@ -324,7 +324,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
324 if (tcp_alloc_md5sig_pool() == NULL) 324 if (tcp_alloc_md5sig_pool() == NULL)
325 BUG(); 325 BUG();
326 } 326 }
327 } while(0); 327 } while (0);
328#endif 328#endif
329 329
330 /* Linkage updates. */ 330 /* Linkage updates. */
@@ -438,7 +438,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
438 keepalive_time_when(newtp)); 438 keepalive_time_when(newtp));
439 439
440 newtp->rx_opt.tstamp_ok = ireq->tstamp_ok; 440 newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
441 if((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) { 441 if ((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) {
442 if (sysctl_tcp_fack) 442 if (sysctl_tcp_fack)
443 newtp->rx_opt.sack_ok |= 2; 443 newtp->rx_opt.sack_ok |= 2;
444 } 444 }
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 2a62b55b15f..f19f5fb361b 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -236,7 +236,7 @@ static u16 tcp_select_window(struct sock *sk)
236 u32 new_win = __tcp_select_window(sk); 236 u32 new_win = __tcp_select_window(sk);
237 237
238 /* Never shrink the offered window */ 238 /* Never shrink the offered window */
239 if(new_win < cur_win) { 239 if (new_win < cur_win) {
240 /* Danger Will Robinson! 240 /* Danger Will Robinson!
241 * Don't update rcv_wup/rcv_wnd here or else 241 * Don't update rcv_wup/rcv_wnd here or else
242 * we will not be able to advertise a zero 242 * we will not be able to advertise a zero
@@ -287,10 +287,12 @@ static void tcp_build_and_update_options(__be32 *ptr, struct tcp_sock *tp,
287 (TCPOPT_SACK << 8) | 287 (TCPOPT_SACK << 8) |
288 (TCPOLEN_SACK_BASE + (tp->rx_opt.eff_sacks * 288 (TCPOLEN_SACK_BASE + (tp->rx_opt.eff_sacks *
289 TCPOLEN_SACK_PERBLOCK))); 289 TCPOLEN_SACK_PERBLOCK)));
290 for(this_sack = 0; this_sack < tp->rx_opt.eff_sacks; this_sack++) { 290
291 for (this_sack = 0; this_sack < tp->rx_opt.eff_sacks; this_sack++) {
291 *ptr++ = htonl(sp[this_sack].start_seq); 292 *ptr++ = htonl(sp[this_sack].start_seq);
292 *ptr++ = htonl(sp[this_sack].end_seq); 293 *ptr++ = htonl(sp[this_sack].end_seq);
293 } 294 }
295
294 if (tp->rx_opt.dsack) { 296 if (tp->rx_opt.dsack) {
295 tp->rx_opt.dsack = 0; 297 tp->rx_opt.dsack = 0;
296 tp->rx_opt.eff_sacks--; 298 tp->rx_opt.eff_sacks--;
@@ -335,7 +337,7 @@ static void tcp_syn_build_options(__be32 *ptr, int mss, int ts, int sack,
335 */ 337 */
336 *ptr++ = htonl((TCPOPT_MSS << 24) | (TCPOLEN_MSS << 16) | mss); 338 *ptr++ = htonl((TCPOPT_MSS << 24) | (TCPOLEN_MSS << 16) | mss);
337 if (ts) { 339 if (ts) {
338 if(sack) 340 if (sack)
339 *ptr++ = htonl((TCPOPT_SACK_PERM << 24) | 341 *ptr++ = htonl((TCPOPT_SACK_PERM << 24) |
340 (TCPOLEN_SACK_PERM << 16) | 342 (TCPOLEN_SACK_PERM << 16) |
341 (TCPOPT_TIMESTAMP << 8) | 343 (TCPOPT_TIMESTAMP << 8) |
@@ -347,7 +349,7 @@ static void tcp_syn_build_options(__be32 *ptr, int mss, int ts, int sack,
347 TCPOLEN_TIMESTAMP); 349 TCPOLEN_TIMESTAMP);
348 *ptr++ = htonl(tstamp); /* TSVAL */ 350 *ptr++ = htonl(tstamp); /* TSVAL */
349 *ptr++ = htonl(ts_recent); /* TSECR */ 351 *ptr++ = htonl(ts_recent); /* TSECR */
350 } else if(sack) 352 } else if (sack)
351 *ptr++ = htonl((TCPOPT_NOP << 24) | 353 *ptr++ = htonl((TCPOPT_NOP << 24) |
352 (TCPOPT_NOP << 16) | 354 (TCPOPT_NOP << 16) |
353 (TCPOPT_SACK_PERM << 8) | 355 (TCPOPT_SACK_PERM << 8) |
@@ -428,7 +430,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
428 sysctl_flags = 0; 430 sysctl_flags = 0;
429 if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) { 431 if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) {
430 tcp_header_size = sizeof(struct tcphdr) + TCPOLEN_MSS; 432 tcp_header_size = sizeof(struct tcphdr) + TCPOLEN_MSS;
431 if(sysctl_tcp_timestamps) { 433 if (sysctl_tcp_timestamps) {
432 tcp_header_size += TCPOLEN_TSTAMP_ALIGNED; 434 tcp_header_size += TCPOLEN_TSTAMP_ALIGNED;
433 sysctl_flags |= SYSCTL_FLAG_TSTAMPS; 435 sysctl_flags |= SYSCTL_FLAG_TSTAMPS;
434 } 436 }
@@ -1618,7 +1620,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int m
1618 u16 flags = TCP_SKB_CB(skb)->flags; 1620 u16 flags = TCP_SKB_CB(skb)->flags;
1619 1621
1620 /* Also punt if next skb has been SACK'd. */ 1622 /* Also punt if next skb has been SACK'd. */
1621 if(TCP_SKB_CB(next_skb)->sacked & TCPCB_SACKED_ACKED) 1623 if (TCP_SKB_CB(next_skb)->sacked & TCPCB_SACKED_ACKED)
1622 return; 1624 return;
1623 1625
1624 /* Next skb is out of window. */ 1626 /* Next skb is out of window. */
@@ -1778,13 +1780,13 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
1778 } 1780 }
1779 1781
1780 /* Collapse two adjacent packets if worthwhile and we can. */ 1782 /* Collapse two adjacent packets if worthwhile and we can. */
1781 if(!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN) && 1783 if (!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN) &&
1782 (skb->len < (cur_mss >> 1)) && 1784 (skb->len < (cur_mss >> 1)) &&
1783 (tcp_write_queue_next(sk, skb) != tcp_send_head(sk)) && 1785 (tcp_write_queue_next(sk, skb) != tcp_send_head(sk)) &&
1784 (!tcp_skb_is_last(sk, skb)) && 1786 (!tcp_skb_is_last(sk, skb)) &&
1785 (skb_shinfo(skb)->nr_frags == 0 && skb_shinfo(tcp_write_queue_next(sk, skb))->nr_frags == 0) && 1787 (skb_shinfo(skb)->nr_frags == 0 && skb_shinfo(tcp_write_queue_next(sk, skb))->nr_frags == 0) &&
1786 (tcp_skb_pcount(skb) == 1 && tcp_skb_pcount(tcp_write_queue_next(sk, skb)) == 1) && 1788 (tcp_skb_pcount(skb) == 1 && tcp_skb_pcount(tcp_write_queue_next(sk, skb)) == 1) &&
1787 (sysctl_tcp_retrans_collapse != 0)) 1789 (sysctl_tcp_retrans_collapse != 0))
1788 tcp_retrans_try_collapse(sk, skb, cur_mss); 1790 tcp_retrans_try_collapse(sk, skb, cur_mss);
1789 1791
1790 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) 1792 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
@@ -1794,9 +1796,9 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
1794 * retransmit when old data is attached. So strip it off 1796 * retransmit when old data is attached. So strip it off
1795 * since it is cheap to do so and saves bytes on the network. 1797 * since it is cheap to do so and saves bytes on the network.
1796 */ 1798 */
1797 if(skb->len > 0 && 1799 if (skb->len > 0 &&
1798 (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) && 1800 (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) &&
1799 tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) { 1801 tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) {
1800 if (!pskb_trim(skb, 0)) { 1802 if (!pskb_trim(skb, 0)) {
1801 TCP_SKB_CB(skb)->seq = TCP_SKB_CB(skb)->end_seq - 1; 1803 TCP_SKB_CB(skb)->seq = TCP_SKB_CB(skb)->end_seq - 1;
1802 skb_shinfo(skb)->gso_segs = 1; 1804 skb_shinfo(skb)->gso_segs = 1;
diff --git a/net/ipv4/tcp_westwood.c b/net/ipv4/tcp_westwood.c
index 4e1b61032a9..1f91aeae10a 100644
--- a/net/ipv4/tcp_westwood.c
+++ b/net/ipv4/tcp_westwood.c
@@ -226,7 +226,7 @@ static void tcp_westwood_event(struct sock *sk, enum tcp_ca_event event)
226 struct tcp_sock *tp = tcp_sk(sk); 226 struct tcp_sock *tp = tcp_sk(sk);
227 struct westwood *w = inet_csk_ca(sk); 227 struct westwood *w = inet_csk_ca(sk);
228 228
229 switch(event) { 229 switch (event) {
230 case CA_EVENT_FAST_ACK: 230 case CA_EVENT_FAST_ACK:
231 westwood_fast_bw(sk); 231 westwood_fast_bw(sk);
232 break; 232 break;