aboutsummaryrefslogtreecommitdiffstats
path: root/include/net/tcp.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/net/tcp.h')
-rw-r--r--include/net/tcp.h402
1 files changed, 98 insertions, 304 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h
index f730935b824a..5010f0c5a56e 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -505,25 +505,6 @@ static __inline__ int tcp_sk_listen_hashfn(struct sock *sk)
505#else 505#else
506# define TCP_TW_RECYCLE_TICK (12+2-TCP_TW_RECYCLE_SLOTS_LOG) 506# define TCP_TW_RECYCLE_TICK (12+2-TCP_TW_RECYCLE_SLOTS_LOG)
507#endif 507#endif
508
509#define BICTCP_BETA_SCALE 1024 /* Scale factor beta calculation
510 * max_cwnd = snd_cwnd * beta
511 */
512#define BICTCP_MAX_INCREMENT 32 /*
513 * Limit on the amount of
514 * increment allowed during
515 * binary search.
516 */
517#define BICTCP_FUNC_OF_MIN_INCR 11 /*
518 * log(B/Smin)/log(B/(B-1))+1,
519 * Smin:min increment
520 * B:log factor
521 */
522#define BICTCP_B 4 /*
523 * In binary search,
524 * go to point (max+min)/N
525 */
526
527/* 508/*
528 * TCP option 509 * TCP option
529 */ 510 */
@@ -596,16 +577,7 @@ extern int sysctl_tcp_adv_win_scale;
596extern int sysctl_tcp_tw_reuse; 577extern int sysctl_tcp_tw_reuse;
597extern int sysctl_tcp_frto; 578extern int sysctl_tcp_frto;
598extern int sysctl_tcp_low_latency; 579extern int sysctl_tcp_low_latency;
599extern int sysctl_tcp_westwood;
600extern int sysctl_tcp_vegas_cong_avoid;
601extern int sysctl_tcp_vegas_alpha;
602extern int sysctl_tcp_vegas_beta;
603extern int sysctl_tcp_vegas_gamma;
604extern int sysctl_tcp_nometrics_save; 580extern int sysctl_tcp_nometrics_save;
605extern int sysctl_tcp_bic;
606extern int sysctl_tcp_bic_fast_convergence;
607extern int sysctl_tcp_bic_low_window;
608extern int sysctl_tcp_bic_beta;
609extern int sysctl_tcp_moderate_rcvbuf; 581extern int sysctl_tcp_moderate_rcvbuf;
610extern int sysctl_tcp_tso_win_divisor; 582extern int sysctl_tcp_tso_win_divisor;
611 583
@@ -749,11 +721,16 @@ static inline int tcp_ack_scheduled(struct tcp_sock *tp)
749 return tp->ack.pending&TCP_ACK_SCHED; 721 return tp->ack.pending&TCP_ACK_SCHED;
750} 722}
751 723
752static __inline__ void tcp_dec_quickack_mode(struct tcp_sock *tp) 724static __inline__ void tcp_dec_quickack_mode(struct tcp_sock *tp, unsigned int pkts)
753{ 725{
754 if (tp->ack.quick && --tp->ack.quick == 0) { 726 if (tp->ack.quick) {
755 /* Leaving quickack mode we deflate ATO. */ 727 if (pkts >= tp->ack.quick) {
756 tp->ack.ato = TCP_ATO_MIN; 728 tp->ack.quick = 0;
729
730 /* Leaving quickack mode we deflate ATO. */
731 tp->ack.ato = TCP_ATO_MIN;
732 } else
733 tp->ack.quick -= pkts;
757 } 734 }
758} 735}
759 736
@@ -871,7 +848,9 @@ extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb,
871 848
872/* tcp_output.c */ 849/* tcp_output.c */
873 850
874extern int tcp_write_xmit(struct sock *, int nonagle); 851extern void __tcp_push_pending_frames(struct sock *sk, struct tcp_sock *tp,
852 unsigned int cur_mss, int nonagle);
853extern int tcp_may_send_now(struct sock *sk, struct tcp_sock *tp);
875extern int tcp_retransmit_skb(struct sock *, struct sk_buff *); 854extern int tcp_retransmit_skb(struct sock *, struct sk_buff *);
876extern void tcp_xmit_retransmit_queue(struct sock *); 855extern void tcp_xmit_retransmit_queue(struct sock *);
877extern void tcp_simple_retransmit(struct sock *); 856extern void tcp_simple_retransmit(struct sock *);
@@ -881,12 +860,16 @@ extern void tcp_send_probe0(struct sock *);
881extern void tcp_send_partial(struct sock *); 860extern void tcp_send_partial(struct sock *);
882extern int tcp_write_wakeup(struct sock *); 861extern int tcp_write_wakeup(struct sock *);
883extern void tcp_send_fin(struct sock *sk); 862extern void tcp_send_fin(struct sock *sk);
884extern void tcp_send_active_reset(struct sock *sk, int priority); 863extern void tcp_send_active_reset(struct sock *sk,
864 unsigned int __nocast priority);
885extern int tcp_send_synack(struct sock *); 865extern int tcp_send_synack(struct sock *);
886extern void tcp_push_one(struct sock *, unsigned mss_now); 866extern void tcp_push_one(struct sock *, unsigned int mss_now);
887extern void tcp_send_ack(struct sock *sk); 867extern void tcp_send_ack(struct sock *sk);
888extern void tcp_send_delayed_ack(struct sock *sk); 868extern void tcp_send_delayed_ack(struct sock *sk);
889 869
870/* tcp_input.c */
871extern void tcp_cwnd_application_limited(struct sock *sk);
872
890/* tcp_timer.c */ 873/* tcp_timer.c */
891extern void tcp_init_xmit_timers(struct sock *); 874extern void tcp_init_xmit_timers(struct sock *);
892extern void tcp_clear_xmit_timers(struct sock *); 875extern void tcp_clear_xmit_timers(struct sock *);
@@ -986,7 +969,7 @@ static inline void tcp_reset_xmit_timer(struct sock *sk, int what, unsigned long
986static inline void tcp_initialize_rcv_mss(struct sock *sk) 969static inline void tcp_initialize_rcv_mss(struct sock *sk)
987{ 970{
988 struct tcp_sock *tp = tcp_sk(sk); 971 struct tcp_sock *tp = tcp_sk(sk);
989 unsigned int hint = min(tp->advmss, tp->mss_cache_std); 972 unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache);
990 973
991 hint = min(hint, tp->rcv_wnd/2); 974 hint = min(hint, tp->rcv_wnd/2);
992 hint = min(hint, TCP_MIN_RCVMSS); 975 hint = min(hint, TCP_MIN_RCVMSS);
@@ -1009,7 +992,7 @@ static __inline__ void tcp_fast_path_on(struct tcp_sock *tp)
1009 992
1010static inline void tcp_fast_path_check(struct sock *sk, struct tcp_sock *tp) 993static inline void tcp_fast_path_check(struct sock *sk, struct tcp_sock *tp)
1011{ 994{
1012 if (skb_queue_len(&tp->out_of_order_queue) == 0 && 995 if (skb_queue_empty(&tp->out_of_order_queue) &&
1013 tp->rcv_wnd && 996 tp->rcv_wnd &&
1014 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf && 997 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
1015 !tp->urg_data) 998 !tp->urg_data)
@@ -1136,6 +1119,82 @@ static inline void tcp_packets_out_dec(struct tcp_sock *tp,
1136 tp->packets_out -= tcp_skb_pcount(skb); 1119 tp->packets_out -= tcp_skb_pcount(skb);
1137} 1120}
1138 1121
1122/* Events passed to congestion control interface */
1123enum tcp_ca_event {
1124 CA_EVENT_TX_START, /* first transmit when no packets in flight */
1125 CA_EVENT_CWND_RESTART, /* congestion window restart */
1126 CA_EVENT_COMPLETE_CWR, /* end of congestion recovery */
1127 CA_EVENT_FRTO, /* fast recovery timeout */
1128 CA_EVENT_LOSS, /* loss timeout */
1129 CA_EVENT_FAST_ACK, /* in sequence ack */
1130 CA_EVENT_SLOW_ACK, /* other ack */
1131};
1132
1133/*
1134 * Interface for adding new TCP congestion control handlers
1135 */
1136#define TCP_CA_NAME_MAX 16
1137struct tcp_congestion_ops {
1138 struct list_head list;
1139
1140 /* initialize private data (optional) */
1141 void (*init)(struct tcp_sock *tp);
1142 /* cleanup private data (optional) */
1143 void (*release)(struct tcp_sock *tp);
1144
1145 /* return slow start threshold (required) */
1146 u32 (*ssthresh)(struct tcp_sock *tp);
1147 /* lower bound for congestion window (optional) */
1148 u32 (*min_cwnd)(struct tcp_sock *tp);
1149 /* do new cwnd calculation (required) */
1150 void (*cong_avoid)(struct tcp_sock *tp, u32 ack,
1151 u32 rtt, u32 in_flight, int good_ack);
1152 /* round trip time sample per acked packet (optional) */
1153 void (*rtt_sample)(struct tcp_sock *tp, u32 usrtt);
1154 /* call before changing ca_state (optional) */
1155 void (*set_state)(struct tcp_sock *tp, u8 new_state);
1156 /* call when cwnd event occurs (optional) */
1157 void (*cwnd_event)(struct tcp_sock *tp, enum tcp_ca_event ev);
1158 /* new value of cwnd after loss (optional) */
1159 u32 (*undo_cwnd)(struct tcp_sock *tp);
1160 /* hook for packet ack accounting (optional) */
1161 void (*pkts_acked)(struct tcp_sock *tp, u32 num_acked);
1162 /* get info for tcp_diag (optional) */
1163 void (*get_info)(struct tcp_sock *tp, u32 ext, struct sk_buff *skb);
1164
1165 char name[TCP_CA_NAME_MAX];
1166 struct module *owner;
1167};
1168
1169extern int tcp_register_congestion_control(struct tcp_congestion_ops *type);
1170extern void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
1171
1172extern void tcp_init_congestion_control(struct tcp_sock *tp);
1173extern void tcp_cleanup_congestion_control(struct tcp_sock *tp);
1174extern int tcp_set_default_congestion_control(const char *name);
1175extern void tcp_get_default_congestion_control(char *name);
1176extern int tcp_set_congestion_control(struct tcp_sock *tp, const char *name);
1177
1178extern struct tcp_congestion_ops tcp_init_congestion_ops;
1179extern u32 tcp_reno_ssthresh(struct tcp_sock *tp);
1180extern void tcp_reno_cong_avoid(struct tcp_sock *tp, u32 ack,
1181 u32 rtt, u32 in_flight, int flag);
1182extern u32 tcp_reno_min_cwnd(struct tcp_sock *tp);
1183extern struct tcp_congestion_ops tcp_reno;
1184
1185static inline void tcp_set_ca_state(struct tcp_sock *tp, u8 ca_state)
1186{
1187 if (tp->ca_ops->set_state)
1188 tp->ca_ops->set_state(tp, ca_state);
1189 tp->ca_state = ca_state;
1190}
1191
1192static inline void tcp_ca_event(struct tcp_sock *tp, enum tcp_ca_event event)
1193{
1194 if (tp->ca_ops->cwnd_event)
1195 tp->ca_ops->cwnd_event(tp, event);
1196}
1197
1139/* This determines how many packets are "in the network" to the best 1198/* This determines how many packets are "in the network" to the best
1140 * of our knowledge. In many cases it is conservative, but where 1199 * of our knowledge. In many cases it is conservative, but where
1141 * detailed information is available from the receiver (via SACK 1200 * detailed information is available from the receiver (via SACK
@@ -1155,91 +1214,6 @@ static __inline__ unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
1155 return (tp->packets_out - tp->left_out + tp->retrans_out); 1214 return (tp->packets_out - tp->left_out + tp->retrans_out);
1156} 1215}
1157 1216
1158/*
1159 * Which congestion algorithim is in use on the connection.
1160 */
1161#define tcp_is_vegas(__tp) ((__tp)->adv_cong == TCP_VEGAS)
1162#define tcp_is_westwood(__tp) ((__tp)->adv_cong == TCP_WESTWOOD)
1163#define tcp_is_bic(__tp) ((__tp)->adv_cong == TCP_BIC)
1164
1165/* Recalculate snd_ssthresh, we want to set it to:
1166 *
1167 * Reno:
1168 * one half the current congestion window, but no
1169 * less than two segments
1170 *
1171 * BIC:
1172 * behave like Reno until low_window is reached,
1173 * then increase congestion window slowly
1174 */
1175static inline __u32 tcp_recalc_ssthresh(struct tcp_sock *tp)
1176{
1177 if (tcp_is_bic(tp)) {
1178 if (sysctl_tcp_bic_fast_convergence &&
1179 tp->snd_cwnd < tp->bictcp.last_max_cwnd)
1180 tp->bictcp.last_max_cwnd = (tp->snd_cwnd *
1181 (BICTCP_BETA_SCALE
1182 + sysctl_tcp_bic_beta))
1183 / (2 * BICTCP_BETA_SCALE);
1184 else
1185 tp->bictcp.last_max_cwnd = tp->snd_cwnd;
1186
1187 if (tp->snd_cwnd > sysctl_tcp_bic_low_window)
1188 return max((tp->snd_cwnd * sysctl_tcp_bic_beta)
1189 / BICTCP_BETA_SCALE, 2U);
1190 }
1191
1192 return max(tp->snd_cwnd >> 1U, 2U);
1193}
1194
1195/* Stop taking Vegas samples for now. */
1196#define tcp_vegas_disable(__tp) ((__tp)->vegas.doing_vegas_now = 0)
1197
1198static inline void tcp_vegas_enable(struct tcp_sock *tp)
1199{
1200 /* There are several situations when we must "re-start" Vegas:
1201 *
1202 * o when a connection is established
1203 * o after an RTO
1204 * o after fast recovery
1205 * o when we send a packet and there is no outstanding
1206 * unacknowledged data (restarting an idle connection)
1207 *
1208 * In these circumstances we cannot do a Vegas calculation at the
1209 * end of the first RTT, because any calculation we do is using
1210 * stale info -- both the saved cwnd and congestion feedback are
1211 * stale.
1212 *
1213 * Instead we must wait until the completion of an RTT during
1214 * which we actually receive ACKs.
1215 */
1216
1217 /* Begin taking Vegas samples next time we send something. */
1218 tp->vegas.doing_vegas_now = 1;
1219
1220 /* Set the beginning of the next send window. */
1221 tp->vegas.beg_snd_nxt = tp->snd_nxt;
1222
1223 tp->vegas.cntRTT = 0;
1224 tp->vegas.minRTT = 0x7fffffff;
1225}
1226
1227/* Should we be taking Vegas samples right now? */
1228#define tcp_vegas_enabled(__tp) ((__tp)->vegas.doing_vegas_now)
1229
1230extern void tcp_ca_init(struct tcp_sock *tp);
1231
1232static inline void tcp_set_ca_state(struct tcp_sock *tp, u8 ca_state)
1233{
1234 if (tcp_is_vegas(tp)) {
1235 if (ca_state == TCP_CA_Open)
1236 tcp_vegas_enable(tp);
1237 else
1238 tcp_vegas_disable(tp);
1239 }
1240 tp->ca_state = ca_state;
1241}
1242
1243/* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd. 1217/* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
1244 * The exception is rate halving phase, when cwnd is decreasing towards 1218 * The exception is rate halving phase, when cwnd is decreasing towards
1245 * ssthresh. 1219 * ssthresh.
@@ -1262,33 +1236,11 @@ static inline void tcp_sync_left_out(struct tcp_sock *tp)
1262 tp->left_out = tp->sacked_out + tp->lost_out; 1236 tp->left_out = tp->sacked_out + tp->lost_out;
1263} 1237}
1264 1238
1265extern void tcp_cwnd_application_limited(struct sock *sk); 1239/* Set slow start threshold and cwnd not falling to slow start */
1266
1267/* Congestion window validation. (RFC2861) */
1268
1269static inline void tcp_cwnd_validate(struct sock *sk, struct tcp_sock *tp)
1270{
1271 __u32 packets_out = tp->packets_out;
1272
1273 if (packets_out >= tp->snd_cwnd) {
1274 /* Network is feed fully. */
1275 tp->snd_cwnd_used = 0;
1276 tp->snd_cwnd_stamp = tcp_time_stamp;
1277 } else {
1278 /* Network starves. */
1279 if (tp->packets_out > tp->snd_cwnd_used)
1280 tp->snd_cwnd_used = tp->packets_out;
1281
1282 if ((s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= tp->rto)
1283 tcp_cwnd_application_limited(sk);
1284 }
1285}
1286
1287/* Set slow start threshould and cwnd not falling to slow start */
1288static inline void __tcp_enter_cwr(struct tcp_sock *tp) 1240static inline void __tcp_enter_cwr(struct tcp_sock *tp)
1289{ 1241{
1290 tp->undo_marker = 0; 1242 tp->undo_marker = 0;
1291 tp->snd_ssthresh = tcp_recalc_ssthresh(tp); 1243 tp->snd_ssthresh = tp->ca_ops->ssthresh(tp);
1292 tp->snd_cwnd = min(tp->snd_cwnd, 1244 tp->snd_cwnd = min(tp->snd_cwnd,
1293 tcp_packets_in_flight(tp) + 1U); 1245 tcp_packets_in_flight(tp) + 1U);
1294 tp->snd_cwnd_cnt = 0; 1246 tp->snd_cwnd_cnt = 0;
@@ -1316,12 +1268,6 @@ static __inline__ __u32 tcp_max_burst(const struct tcp_sock *tp)
1316 return 3; 1268 return 3;
1317} 1269}
1318 1270
1319static __inline__ int tcp_minshall_check(const struct tcp_sock *tp)
1320{
1321 return after(tp->snd_sml,tp->snd_una) &&
1322 !after(tp->snd_sml, tp->snd_nxt);
1323}
1324
1325static __inline__ void tcp_minshall_update(struct tcp_sock *tp, int mss, 1271static __inline__ void tcp_minshall_update(struct tcp_sock *tp, int mss,
1326 const struct sk_buff *skb) 1272 const struct sk_buff *skb)
1327{ 1273{
@@ -1329,122 +1275,18 @@ static __inline__ void tcp_minshall_update(struct tcp_sock *tp, int mss,
1329 tp->snd_sml = TCP_SKB_CB(skb)->end_seq; 1275 tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
1330} 1276}
1331 1277
1332/* Return 0, if packet can be sent now without violation Nagle's rules:
1333 1. It is full sized.
1334 2. Or it contains FIN.
1335 3. Or TCP_NODELAY was set.
1336 4. Or TCP_CORK is not set, and all sent packets are ACKed.
1337 With Minshall's modification: all sent small packets are ACKed.
1338 */
1339
1340static __inline__ int
1341tcp_nagle_check(const struct tcp_sock *tp, const struct sk_buff *skb,
1342 unsigned mss_now, int nonagle)
1343{
1344 return (skb->len < mss_now &&
1345 !(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) &&
1346 ((nonagle&TCP_NAGLE_CORK) ||
1347 (!nonagle &&
1348 tp->packets_out &&
1349 tcp_minshall_check(tp))));
1350}
1351
1352extern void tcp_set_skb_tso_segs(struct sock *, struct sk_buff *);
1353
1354/* This checks if the data bearing packet SKB (usually sk->sk_send_head)
1355 * should be put on the wire right now.
1356 */
1357static __inline__ int tcp_snd_test(struct sock *sk,
1358 struct sk_buff *skb,
1359 unsigned cur_mss, int nonagle)
1360{
1361 struct tcp_sock *tp = tcp_sk(sk);
1362 int pkts = tcp_skb_pcount(skb);
1363
1364 if (!pkts) {
1365 tcp_set_skb_tso_segs(sk, skb);
1366 pkts = tcp_skb_pcount(skb);
1367 }
1368
1369 /* RFC 1122 - section 4.2.3.4
1370 *
1371 * We must queue if
1372 *
1373 * a) The right edge of this frame exceeds the window
1374 * b) There are packets in flight and we have a small segment
1375 * [SWS avoidance and Nagle algorithm]
1376 * (part of SWS is done on packetization)
1377 * Minshall version sounds: there are no _small_
1378 * segments in flight. (tcp_nagle_check)
1379 * c) We have too many packets 'in flight'
1380 *
1381 * Don't use the nagle rule for urgent data (or
1382 * for the final FIN -DaveM).
1383 *
1384 * Also, Nagle rule does not apply to frames, which
1385 * sit in the middle of queue (they have no chances
1386 * to get new data) and if room at tail of skb is
1387 * not enough to save something seriously (<32 for now).
1388 */
1389
1390 /* Don't be strict about the congestion window for the
1391 * final FIN frame. -DaveM
1392 */
1393 return (((nonagle&TCP_NAGLE_PUSH) || tp->urg_mode
1394 || !tcp_nagle_check(tp, skb, cur_mss, nonagle)) &&
1395 (((tcp_packets_in_flight(tp) + (pkts-1)) < tp->snd_cwnd) ||
1396 (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)) &&
1397 !after(TCP_SKB_CB(skb)->end_seq, tp->snd_una + tp->snd_wnd));
1398}
1399
1400static __inline__ void tcp_check_probe_timer(struct sock *sk, struct tcp_sock *tp) 1278static __inline__ void tcp_check_probe_timer(struct sock *sk, struct tcp_sock *tp)
1401{ 1279{
1402 if (!tp->packets_out && !tp->pending) 1280 if (!tp->packets_out && !tp->pending)
1403 tcp_reset_xmit_timer(sk, TCP_TIME_PROBE0, tp->rto); 1281 tcp_reset_xmit_timer(sk, TCP_TIME_PROBE0, tp->rto);
1404} 1282}
1405 1283
1406static __inline__ int tcp_skb_is_last(const struct sock *sk,
1407 const struct sk_buff *skb)
1408{
1409 return skb->next == (struct sk_buff *)&sk->sk_write_queue;
1410}
1411
1412/* Push out any pending frames which were held back due to
1413 * TCP_CORK or attempt at coalescing tiny packets.
1414 * The socket must be locked by the caller.
1415 */
1416static __inline__ void __tcp_push_pending_frames(struct sock *sk,
1417 struct tcp_sock *tp,
1418 unsigned cur_mss,
1419 int nonagle)
1420{
1421 struct sk_buff *skb = sk->sk_send_head;
1422
1423 if (skb) {
1424 if (!tcp_skb_is_last(sk, skb))
1425 nonagle = TCP_NAGLE_PUSH;
1426 if (!tcp_snd_test(sk, skb, cur_mss, nonagle) ||
1427 tcp_write_xmit(sk, nonagle))
1428 tcp_check_probe_timer(sk, tp);
1429 }
1430 tcp_cwnd_validate(sk, tp);
1431}
1432
1433static __inline__ void tcp_push_pending_frames(struct sock *sk, 1284static __inline__ void tcp_push_pending_frames(struct sock *sk,
1434 struct tcp_sock *tp) 1285 struct tcp_sock *tp)
1435{ 1286{
1436 __tcp_push_pending_frames(sk, tp, tcp_current_mss(sk, 1), tp->nonagle); 1287 __tcp_push_pending_frames(sk, tp, tcp_current_mss(sk, 1), tp->nonagle);
1437} 1288}
1438 1289
1439static __inline__ int tcp_may_send_now(struct sock *sk, struct tcp_sock *tp)
1440{
1441 struct sk_buff *skb = sk->sk_send_head;
1442
1443 return (skb &&
1444 tcp_snd_test(sk, skb, tcp_current_mss(sk, 1),
1445 tcp_skb_is_last(sk, skb) ? TCP_NAGLE_PUSH : tp->nonagle));
1446}
1447
1448static __inline__ void tcp_init_wl(struct tcp_sock *tp, u32 ack, u32 seq) 1290static __inline__ void tcp_init_wl(struct tcp_sock *tp, u32 ack, u32 seq)
1449{ 1291{
1450 tp->snd_wl1 = seq; 1292 tp->snd_wl1 = seq;
@@ -1876,52 +1718,4 @@ struct tcp_iter_state {
1876extern int tcp_proc_register(struct tcp_seq_afinfo *afinfo); 1718extern int tcp_proc_register(struct tcp_seq_afinfo *afinfo);
1877extern void tcp_proc_unregister(struct tcp_seq_afinfo *afinfo); 1719extern void tcp_proc_unregister(struct tcp_seq_afinfo *afinfo);
1878 1720
1879/* TCP Westwood functions and constants */
1880
1881#define TCP_WESTWOOD_INIT_RTT (20*HZ) /* maybe too conservative?! */
1882#define TCP_WESTWOOD_RTT_MIN (HZ/20) /* 50ms */
1883
1884static inline void tcp_westwood_update_rtt(struct tcp_sock *tp, __u32 rtt_seq)
1885{
1886 if (tcp_is_westwood(tp))
1887 tp->westwood.rtt = rtt_seq;
1888}
1889
1890static inline __u32 __tcp_westwood_bw_rttmin(const struct tcp_sock *tp)
1891{
1892 return max((tp->westwood.bw_est) * (tp->westwood.rtt_min) /
1893 (__u32) (tp->mss_cache_std),
1894 2U);
1895}
1896
1897static inline __u32 tcp_westwood_bw_rttmin(const struct tcp_sock *tp)
1898{
1899 return tcp_is_westwood(tp) ? __tcp_westwood_bw_rttmin(tp) : 0;
1900}
1901
1902static inline int tcp_westwood_ssthresh(struct tcp_sock *tp)
1903{
1904 __u32 ssthresh = 0;
1905
1906 if (tcp_is_westwood(tp)) {
1907 ssthresh = __tcp_westwood_bw_rttmin(tp);
1908 if (ssthresh)
1909 tp->snd_ssthresh = ssthresh;
1910 }
1911
1912 return (ssthresh != 0);
1913}
1914
1915static inline int tcp_westwood_cwnd(struct tcp_sock *tp)
1916{
1917 __u32 cwnd = 0;
1918
1919 if (tcp_is_westwood(tp)) {
1920 cwnd = __tcp_westwood_bw_rttmin(tp);
1921 if (cwnd)
1922 tp->snd_cwnd = cwnd;
1923 }
1924
1925 return (cwnd != 0);
1926}
1927#endif /* _TCP_H */ 1721#endif /* _TCP_H */