aboutsummaryrefslogtreecommitdiffstats
path: root/include/net/tcp.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/net/tcp.h')
-rw-r--r--include/net/tcp.h399
1 files changed, 109 insertions, 290 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h
index e71f8ba3e101..ec9e20c27179 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -31,6 +31,7 @@
31#include <linux/cache.h> 31#include <linux/cache.h>
32#include <linux/percpu.h> 32#include <linux/percpu.h>
33#include <net/checksum.h> 33#include <net/checksum.h>
34#include <net/request_sock.h>
34#include <net/sock.h> 35#include <net/sock.h>
35#include <net/snmp.h> 36#include <net/snmp.h>
36#include <net/ip.h> 37#include <net/ip.h>
@@ -504,25 +505,6 @@ static __inline__ int tcp_sk_listen_hashfn(struct sock *sk)
504#else 505#else
505# define TCP_TW_RECYCLE_TICK (12+2-TCP_TW_RECYCLE_SLOTS_LOG) 506# define TCP_TW_RECYCLE_TICK (12+2-TCP_TW_RECYCLE_SLOTS_LOG)
506#endif 507#endif
507
508#define BICTCP_BETA_SCALE 1024 /* Scale factor beta calculation
509 * max_cwnd = snd_cwnd * beta
510 */
511#define BICTCP_MAX_INCREMENT 32 /*
512 * Limit on the amount of
513 * increment allowed during
514 * binary search.
515 */
516#define BICTCP_FUNC_OF_MIN_INCR 11 /*
517 * log(B/Smin)/log(B/(B-1))+1,
518 * Smin:min increment
519 * B:log factor
520 */
521#define BICTCP_B 4 /*
522 * In binary search,
523 * go to point (max+min)/N
524 */
525
526/* 508/*
527 * TCP option 509 * TCP option
528 */ 510 */
@@ -563,7 +545,6 @@ static __inline__ int tcp_sk_listen_hashfn(struct sock *sk)
563#define TCP_NAGLE_PUSH 4 /* Cork is overriden for already queued data */ 545#define TCP_NAGLE_PUSH 4 /* Cork is overriden for already queued data */
564 546
565/* sysctl variables for tcp */ 547/* sysctl variables for tcp */
566extern int sysctl_max_syn_backlog;
567extern int sysctl_tcp_timestamps; 548extern int sysctl_tcp_timestamps;
568extern int sysctl_tcp_window_scaling; 549extern int sysctl_tcp_window_scaling;
569extern int sysctl_tcp_sack; 550extern int sysctl_tcp_sack;
@@ -596,16 +577,7 @@ extern int sysctl_tcp_adv_win_scale;
596extern int sysctl_tcp_tw_reuse; 577extern int sysctl_tcp_tw_reuse;
597extern int sysctl_tcp_frto; 578extern int sysctl_tcp_frto;
598extern int sysctl_tcp_low_latency; 579extern int sysctl_tcp_low_latency;
599extern int sysctl_tcp_westwood;
600extern int sysctl_tcp_vegas_cong_avoid;
601extern int sysctl_tcp_vegas_alpha;
602extern int sysctl_tcp_vegas_beta;
603extern int sysctl_tcp_vegas_gamma;
604extern int sysctl_tcp_nometrics_save; 580extern int sysctl_tcp_nometrics_save;
605extern int sysctl_tcp_bic;
606extern int sysctl_tcp_bic_fast_convergence;
607extern int sysctl_tcp_bic_low_window;
608extern int sysctl_tcp_bic_beta;
609extern int sysctl_tcp_moderate_rcvbuf; 581extern int sysctl_tcp_moderate_rcvbuf;
610extern int sysctl_tcp_tso_win_divisor; 582extern int sysctl_tcp_tso_win_divisor;
611 583
@@ -613,74 +585,6 @@ extern atomic_t tcp_memory_allocated;
613extern atomic_t tcp_sockets_allocated; 585extern atomic_t tcp_sockets_allocated;
614extern int tcp_memory_pressure; 586extern int tcp_memory_pressure;
615 587
616struct open_request;
617
618struct or_calltable {
619 int family;
620 int (*rtx_syn_ack) (struct sock *sk, struct open_request *req, struct dst_entry*);
621 void (*send_ack) (struct sk_buff *skb, struct open_request *req);
622 void (*destructor) (struct open_request *req);
623 void (*send_reset) (struct sk_buff *skb);
624};
625
626struct tcp_v4_open_req {
627 __u32 loc_addr;
628 __u32 rmt_addr;
629 struct ip_options *opt;
630};
631
632#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
633struct tcp_v6_open_req {
634 struct in6_addr loc_addr;
635 struct in6_addr rmt_addr;
636 struct sk_buff *pktopts;
637 int iif;
638};
639#endif
640
641/* this structure is too big */
642struct open_request {
643 struct open_request *dl_next; /* Must be first member! */
644 __u32 rcv_isn;
645 __u32 snt_isn;
646 __u16 rmt_port;
647 __u16 mss;
648 __u8 retrans;
649 __u8 __pad;
650 __u16 snd_wscale : 4,
651 rcv_wscale : 4,
652 tstamp_ok : 1,
653 sack_ok : 1,
654 wscale_ok : 1,
655 ecn_ok : 1,
656 acked : 1;
657 /* The following two fields can be easily recomputed I think -AK */
658 __u32 window_clamp; /* window clamp at creation time */
659 __u32 rcv_wnd; /* rcv_wnd offered first time */
660 __u32 ts_recent;
661 unsigned long expires;
662 struct or_calltable *class;
663 struct sock *sk;
664 union {
665 struct tcp_v4_open_req v4_req;
666#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
667 struct tcp_v6_open_req v6_req;
668#endif
669 } af;
670};
671
672/* SLAB cache for open requests. */
673extern kmem_cache_t *tcp_openreq_cachep;
674
675#define tcp_openreq_alloc() kmem_cache_alloc(tcp_openreq_cachep, SLAB_ATOMIC)
676#define tcp_openreq_fastfree(req) kmem_cache_free(tcp_openreq_cachep, req)
677
678static inline void tcp_openreq_free(struct open_request *req)
679{
680 req->class->destructor(req);
681 tcp_openreq_fastfree(req);
682}
683
684#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 588#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
685#define TCP_INET_FAMILY(fam) ((fam) == AF_INET) 589#define TCP_INET_FAMILY(fam) ((fam) == AF_INET)
686#else 590#else
@@ -708,7 +612,7 @@ struct tcp_func {
708 612
709 struct sock * (*syn_recv_sock) (struct sock *sk, 613 struct sock * (*syn_recv_sock) (struct sock *sk,
710 struct sk_buff *skb, 614 struct sk_buff *skb,
711 struct open_request *req, 615 struct request_sock *req,
712 struct dst_entry *dst); 616 struct dst_entry *dst);
713 617
714 int (*remember_stamp) (struct sock *sk); 618 int (*remember_stamp) (struct sock *sk);
@@ -852,8 +756,8 @@ extern enum tcp_tw_status tcp_timewait_state_process(struct tcp_tw_bucket *tw,
852 unsigned len); 756 unsigned len);
853 757
854extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb, 758extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb,
855 struct open_request *req, 759 struct request_sock *req,
856 struct open_request **prev); 760 struct request_sock **prev);
857extern int tcp_child_process(struct sock *parent, 761extern int tcp_child_process(struct sock *parent,
858 struct sock *child, 762 struct sock *child,
859 struct sk_buff *skb); 763 struct sk_buff *skb);
@@ -903,12 +807,12 @@ extern int tcp_v4_conn_request(struct sock *sk,
903 struct sk_buff *skb); 807 struct sk_buff *skb);
904 808
905extern struct sock * tcp_create_openreq_child(struct sock *sk, 809extern struct sock * tcp_create_openreq_child(struct sock *sk,
906 struct open_request *req, 810 struct request_sock *req,
907 struct sk_buff *skb); 811 struct sk_buff *skb);
908 812
909extern struct sock * tcp_v4_syn_recv_sock(struct sock *sk, 813extern struct sock * tcp_v4_syn_recv_sock(struct sock *sk,
910 struct sk_buff *skb, 814 struct sk_buff *skb,
911 struct open_request *req, 815 struct request_sock *req,
912 struct dst_entry *dst); 816 struct dst_entry *dst);
913 817
914extern int tcp_v4_do_rcv(struct sock *sk, 818extern int tcp_v4_do_rcv(struct sock *sk,
@@ -922,7 +826,7 @@ extern int tcp_connect(struct sock *sk);
922 826
923extern struct sk_buff * tcp_make_synack(struct sock *sk, 827extern struct sk_buff * tcp_make_synack(struct sock *sk,
924 struct dst_entry *dst, 828 struct dst_entry *dst,
925 struct open_request *req); 829 struct request_sock *req);
926 830
927extern int tcp_disconnect(struct sock *sk, int flags); 831extern int tcp_disconnect(struct sock *sk, int flags);
928 832
@@ -1204,6 +1108,82 @@ static inline void tcp_packets_out_dec(struct tcp_sock *tp,
1204 tp->packets_out -= tcp_skb_pcount(skb); 1108 tp->packets_out -= tcp_skb_pcount(skb);
1205} 1109}
1206 1110
1111/* Events passed to congestion control interface */
1112enum tcp_ca_event {
1113 CA_EVENT_TX_START, /* first transmit when no packets in flight */
1114 CA_EVENT_CWND_RESTART, /* congestion window restart */
1115 CA_EVENT_COMPLETE_CWR, /* end of congestion recovery */
1116 CA_EVENT_FRTO, /* fast recovery timeout */
1117 CA_EVENT_LOSS, /* loss timeout */
1118 CA_EVENT_FAST_ACK, /* in sequence ack */
1119 CA_EVENT_SLOW_ACK, /* other ack */
1120};
1121
1122/*
1123 * Interface for adding new TCP congestion control handlers
1124 */
1125#define TCP_CA_NAME_MAX 16
1126struct tcp_congestion_ops {
1127 struct list_head list;
1128
1129 /* initialize private data (optional) */
1130 void (*init)(struct tcp_sock *tp);
1131 /* cleanup private data (optional) */
1132 void (*release)(struct tcp_sock *tp);
1133
1134 /* return slow start threshold (required) */
1135 u32 (*ssthresh)(struct tcp_sock *tp);
1136 /* lower bound for congestion window (optional) */
1137 u32 (*min_cwnd)(struct tcp_sock *tp);
1138 /* do new cwnd calculation (required) */
1139 void (*cong_avoid)(struct tcp_sock *tp, u32 ack,
1140 u32 rtt, u32 in_flight, int good_ack);
1141 /* round trip time sample per acked packet (optional) */
1142 void (*rtt_sample)(struct tcp_sock *tp, u32 usrtt);
1143 /* call before changing ca_state (optional) */
1144 void (*set_state)(struct tcp_sock *tp, u8 new_state);
1145 /* call when cwnd event occurs (optional) */
1146 void (*cwnd_event)(struct tcp_sock *tp, enum tcp_ca_event ev);
1147 /* new value of cwnd after loss (optional) */
1148 u32 (*undo_cwnd)(struct tcp_sock *tp);
1149 /* hook for packet ack accounting (optional) */
1150 void (*pkts_acked)(struct tcp_sock *tp, u32 num_acked);
1151 /* get info for tcp_diag (optional) */
1152 void (*get_info)(struct tcp_sock *tp, u32 ext, struct sk_buff *skb);
1153
1154 char name[TCP_CA_NAME_MAX];
1155 struct module *owner;
1156};
1157
1158extern int tcp_register_congestion_control(struct tcp_congestion_ops *type);
1159extern void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
1160
1161extern void tcp_init_congestion_control(struct tcp_sock *tp);
1162extern void tcp_cleanup_congestion_control(struct tcp_sock *tp);
1163extern int tcp_set_default_congestion_control(const char *name);
1164extern void tcp_get_default_congestion_control(char *name);
1165extern int tcp_set_congestion_control(struct tcp_sock *tp, const char *name);
1166
1167extern struct tcp_congestion_ops tcp_init_congestion_ops;
1168extern u32 tcp_reno_ssthresh(struct tcp_sock *tp);
1169extern void tcp_reno_cong_avoid(struct tcp_sock *tp, u32 ack,
1170 u32 rtt, u32 in_flight, int flag);
1171extern u32 tcp_reno_min_cwnd(struct tcp_sock *tp);
1172extern struct tcp_congestion_ops tcp_reno;
1173
1174static inline void tcp_set_ca_state(struct tcp_sock *tp, u8 ca_state)
1175{
1176 if (tp->ca_ops->set_state)
1177 tp->ca_ops->set_state(tp, ca_state);
1178 tp->ca_state = ca_state;
1179}
1180
1181static inline void tcp_ca_event(struct tcp_sock *tp, enum tcp_ca_event event)
1182{
1183 if (tp->ca_ops->cwnd_event)
1184 tp->ca_ops->cwnd_event(tp, event);
1185}
1186
1207/* This determines how many packets are "in the network" to the best 1187/* This determines how many packets are "in the network" to the best
1208 * of our knowledge. In many cases it is conservative, but where 1188 * of our knowledge. In many cases it is conservative, but where
1209 * detailed information is available from the receiver (via SACK 1189 * detailed information is available from the receiver (via SACK
@@ -1223,91 +1203,6 @@ static __inline__ unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
1223 return (tp->packets_out - tp->left_out + tp->retrans_out); 1203 return (tp->packets_out - tp->left_out + tp->retrans_out);
1224} 1204}
1225 1205
1226/*
1227 * Which congestion algorithim is in use on the connection.
1228 */
1229#define tcp_is_vegas(__tp) ((__tp)->adv_cong == TCP_VEGAS)
1230#define tcp_is_westwood(__tp) ((__tp)->adv_cong == TCP_WESTWOOD)
1231#define tcp_is_bic(__tp) ((__tp)->adv_cong == TCP_BIC)
1232
1233/* Recalculate snd_ssthresh, we want to set it to:
1234 *
1235 * Reno:
1236 * one half the current congestion window, but no
1237 * less than two segments
1238 *
1239 * BIC:
1240 * behave like Reno until low_window is reached,
1241 * then increase congestion window slowly
1242 */
1243static inline __u32 tcp_recalc_ssthresh(struct tcp_sock *tp)
1244{
1245 if (tcp_is_bic(tp)) {
1246 if (sysctl_tcp_bic_fast_convergence &&
1247 tp->snd_cwnd < tp->bictcp.last_max_cwnd)
1248 tp->bictcp.last_max_cwnd = (tp->snd_cwnd *
1249 (BICTCP_BETA_SCALE
1250 + sysctl_tcp_bic_beta))
1251 / (2 * BICTCP_BETA_SCALE);
1252 else
1253 tp->bictcp.last_max_cwnd = tp->snd_cwnd;
1254
1255 if (tp->snd_cwnd > sysctl_tcp_bic_low_window)
1256 return max((tp->snd_cwnd * sysctl_tcp_bic_beta)
1257 / BICTCP_BETA_SCALE, 2U);
1258 }
1259
1260 return max(tp->snd_cwnd >> 1U, 2U);
1261}
1262
1263/* Stop taking Vegas samples for now. */
1264#define tcp_vegas_disable(__tp) ((__tp)->vegas.doing_vegas_now = 0)
1265
1266static inline void tcp_vegas_enable(struct tcp_sock *tp)
1267{
1268 /* There are several situations when we must "re-start" Vegas:
1269 *
1270 * o when a connection is established
1271 * o after an RTO
1272 * o after fast recovery
1273 * o when we send a packet and there is no outstanding
1274 * unacknowledged data (restarting an idle connection)
1275 *
1276 * In these circumstances we cannot do a Vegas calculation at the
1277 * end of the first RTT, because any calculation we do is using
1278 * stale info -- both the saved cwnd and congestion feedback are
1279 * stale.
1280 *
1281 * Instead we must wait until the completion of an RTT during
1282 * which we actually receive ACKs.
1283 */
1284
1285 /* Begin taking Vegas samples next time we send something. */
1286 tp->vegas.doing_vegas_now = 1;
1287
1288 /* Set the beginning of the next send window. */
1289 tp->vegas.beg_snd_nxt = tp->snd_nxt;
1290
1291 tp->vegas.cntRTT = 0;
1292 tp->vegas.minRTT = 0x7fffffff;
1293}
1294
1295/* Should we be taking Vegas samples right now? */
1296#define tcp_vegas_enabled(__tp) ((__tp)->vegas.doing_vegas_now)
1297
1298extern void tcp_ca_init(struct tcp_sock *tp);
1299
1300static inline void tcp_set_ca_state(struct tcp_sock *tp, u8 ca_state)
1301{
1302 if (tcp_is_vegas(tp)) {
1303 if (ca_state == TCP_CA_Open)
1304 tcp_vegas_enable(tp);
1305 else
1306 tcp_vegas_disable(tp);
1307 }
1308 tp->ca_state = ca_state;
1309}
1310
1311/* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd. 1206/* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
1312 * The exception is rate halving phase, when cwnd is decreasing towards 1207 * The exception is rate halving phase, when cwnd is decreasing towards
1313 * ssthresh. 1208 * ssthresh.
@@ -1356,7 +1251,7 @@ static inline void tcp_cwnd_validate(struct sock *sk, struct tcp_sock *tp)
1356static inline void __tcp_enter_cwr(struct tcp_sock *tp) 1251static inline void __tcp_enter_cwr(struct tcp_sock *tp)
1357{ 1252{
1358 tp->undo_marker = 0; 1253 tp->undo_marker = 0;
1359 tp->snd_ssthresh = tcp_recalc_ssthresh(tp); 1254 tp->snd_ssthresh = tp->ca_ops->ssthresh(tp);
1360 tp->snd_cwnd = min(tp->snd_cwnd, 1255 tp->snd_cwnd = min(tp->snd_cwnd,
1361 tcp_packets_in_flight(tp) + 1U); 1256 tcp_packets_in_flight(tp) + 1U);
1362 tp->snd_cwnd_cnt = 0; 1257 tp->snd_cwnd_cnt = 0;
@@ -1750,99 +1645,71 @@ static inline int tcp_full_space(const struct sock *sk)
1750 return tcp_win_from_space(sk->sk_rcvbuf); 1645 return tcp_win_from_space(sk->sk_rcvbuf);
1751} 1646}
1752 1647
1753static inline void tcp_acceptq_queue(struct sock *sk, struct open_request *req, 1648static inline void tcp_acceptq_queue(struct sock *sk, struct request_sock *req,
1754 struct sock *child) 1649 struct sock *child)
1755{ 1650{
1756 struct tcp_sock *tp = tcp_sk(sk); 1651 reqsk_queue_add(&tcp_sk(sk)->accept_queue, req, sk, child);
1757
1758 req->sk = child;
1759 sk_acceptq_added(sk);
1760
1761 if (!tp->accept_queue_tail) {
1762 tp->accept_queue = req;
1763 } else {
1764 tp->accept_queue_tail->dl_next = req;
1765 }
1766 tp->accept_queue_tail = req;
1767 req->dl_next = NULL;
1768} 1652}
1769 1653
1770struct tcp_listen_opt
1771{
1772 u8 max_qlen_log; /* log_2 of maximal queued SYNs */
1773 int qlen;
1774 int qlen_young;
1775 int clock_hand;
1776 u32 hash_rnd;
1777 struct open_request *syn_table[TCP_SYNQ_HSIZE];
1778};
1779
1780static inline void 1654static inline void
1781tcp_synq_removed(struct sock *sk, struct open_request *req) 1655tcp_synq_removed(struct sock *sk, struct request_sock *req)
1782{ 1656{
1783 struct tcp_listen_opt *lopt = tcp_sk(sk)->listen_opt; 1657 if (reqsk_queue_removed(&tcp_sk(sk)->accept_queue, req) == 0)
1784
1785 if (--lopt->qlen == 0)
1786 tcp_delete_keepalive_timer(sk); 1658 tcp_delete_keepalive_timer(sk);
1787 if (req->retrans == 0)
1788 lopt->qlen_young--;
1789} 1659}
1790 1660
1791static inline void tcp_synq_added(struct sock *sk) 1661static inline void tcp_synq_added(struct sock *sk)
1792{ 1662{
1793 struct tcp_listen_opt *lopt = tcp_sk(sk)->listen_opt; 1663 if (reqsk_queue_added(&tcp_sk(sk)->accept_queue) == 0)
1794
1795 if (lopt->qlen++ == 0)
1796 tcp_reset_keepalive_timer(sk, TCP_TIMEOUT_INIT); 1664 tcp_reset_keepalive_timer(sk, TCP_TIMEOUT_INIT);
1797 lopt->qlen_young++;
1798} 1665}
1799 1666
1800static inline int tcp_synq_len(struct sock *sk) 1667static inline int tcp_synq_len(struct sock *sk)
1801{ 1668{
1802 return tcp_sk(sk)->listen_opt->qlen; 1669 return reqsk_queue_len(&tcp_sk(sk)->accept_queue);
1803} 1670}
1804 1671
1805static inline int tcp_synq_young(struct sock *sk) 1672static inline int tcp_synq_young(struct sock *sk)
1806{ 1673{
1807 return tcp_sk(sk)->listen_opt->qlen_young; 1674 return reqsk_queue_len_young(&tcp_sk(sk)->accept_queue);
1808} 1675}
1809 1676
1810static inline int tcp_synq_is_full(struct sock *sk) 1677static inline int tcp_synq_is_full(struct sock *sk)
1811{ 1678{
1812 return tcp_synq_len(sk) >> tcp_sk(sk)->listen_opt->max_qlen_log; 1679 return reqsk_queue_is_full(&tcp_sk(sk)->accept_queue);
1813} 1680}
1814 1681
1815static inline void tcp_synq_unlink(struct tcp_sock *tp, struct open_request *req, 1682static inline void tcp_synq_unlink(struct tcp_sock *tp, struct request_sock *req,
1816 struct open_request **prev) 1683 struct request_sock **prev)
1817{ 1684{
1818 write_lock(&tp->syn_wait_lock); 1685 reqsk_queue_unlink(&tp->accept_queue, req, prev);
1819 *prev = req->dl_next;
1820 write_unlock(&tp->syn_wait_lock);
1821} 1686}
1822 1687
1823static inline void tcp_synq_drop(struct sock *sk, struct open_request *req, 1688static inline void tcp_synq_drop(struct sock *sk, struct request_sock *req,
1824 struct open_request **prev) 1689 struct request_sock **prev)
1825{ 1690{
1826 tcp_synq_unlink(tcp_sk(sk), req, prev); 1691 tcp_synq_unlink(tcp_sk(sk), req, prev);
1827 tcp_synq_removed(sk, req); 1692 tcp_synq_removed(sk, req);
1828 tcp_openreq_free(req); 1693 reqsk_free(req);
1829} 1694}
1830 1695
1831static __inline__ void tcp_openreq_init(struct open_request *req, 1696static __inline__ void tcp_openreq_init(struct request_sock *req,
1832 struct tcp_options_received *rx_opt, 1697 struct tcp_options_received *rx_opt,
1833 struct sk_buff *skb) 1698 struct sk_buff *skb)
1834{ 1699{
1700 struct inet_request_sock *ireq = inet_rsk(req);
1701
1835 req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */ 1702 req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */
1836 req->rcv_isn = TCP_SKB_CB(skb)->seq; 1703 tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
1837 req->mss = rx_opt->mss_clamp; 1704 req->mss = rx_opt->mss_clamp;
1838 req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0; 1705 req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
1839 req->tstamp_ok = rx_opt->tstamp_ok; 1706 ireq->tstamp_ok = rx_opt->tstamp_ok;
1840 req->sack_ok = rx_opt->sack_ok; 1707 ireq->sack_ok = rx_opt->sack_ok;
1841 req->snd_wscale = rx_opt->snd_wscale; 1708 ireq->snd_wscale = rx_opt->snd_wscale;
1842 req->wscale_ok = rx_opt->wscale_ok; 1709 ireq->wscale_ok = rx_opt->wscale_ok;
1843 req->acked = 0; 1710 ireq->acked = 0;
1844 req->ecn_ok = 0; 1711 ireq->ecn_ok = 0;
1845 req->rmt_port = skb->h.th->source; 1712 ireq->rmt_port = skb->h.th->source;
1846} 1713}
1847 1714
1848extern void tcp_enter_memory_pressure(void); 1715extern void tcp_enter_memory_pressure(void);
@@ -1972,52 +1839,4 @@ struct tcp_iter_state {
1972extern int tcp_proc_register(struct tcp_seq_afinfo *afinfo); 1839extern int tcp_proc_register(struct tcp_seq_afinfo *afinfo);
1973extern void tcp_proc_unregister(struct tcp_seq_afinfo *afinfo); 1840extern void tcp_proc_unregister(struct tcp_seq_afinfo *afinfo);
1974 1841
1975/* TCP Westwood functions and constants */
1976
1977#define TCP_WESTWOOD_INIT_RTT (20*HZ) /* maybe too conservative?! */
1978#define TCP_WESTWOOD_RTT_MIN (HZ/20) /* 50ms */
1979
1980static inline void tcp_westwood_update_rtt(struct tcp_sock *tp, __u32 rtt_seq)
1981{
1982 if (tcp_is_westwood(tp))
1983 tp->westwood.rtt = rtt_seq;
1984}
1985
1986static inline __u32 __tcp_westwood_bw_rttmin(const struct tcp_sock *tp)
1987{
1988 return max((tp->westwood.bw_est) * (tp->westwood.rtt_min) /
1989 (__u32) (tp->mss_cache_std),
1990 2U);
1991}
1992
1993static inline __u32 tcp_westwood_bw_rttmin(const struct tcp_sock *tp)
1994{
1995 return tcp_is_westwood(tp) ? __tcp_westwood_bw_rttmin(tp) : 0;
1996}
1997
1998static inline int tcp_westwood_ssthresh(struct tcp_sock *tp)
1999{
2000 __u32 ssthresh = 0;
2001
2002 if (tcp_is_westwood(tp)) {
2003 ssthresh = __tcp_westwood_bw_rttmin(tp);
2004 if (ssthresh)
2005 tp->snd_ssthresh = ssthresh;
2006 }
2007
2008 return (ssthresh != 0);
2009}
2010
2011static inline int tcp_westwood_cwnd(struct tcp_sock *tp)
2012{
2013 __u32 cwnd = 0;
2014
2015 if (tcp_is_westwood(tp)) {
2016 cwnd = __tcp_westwood_bw_rttmin(tp);
2017 if (cwnd)
2018 tp->snd_cwnd = cwnd;
2019 }
2020
2021 return (cwnd != 0);
2022}
2023#endif /* _TCP_H */ 1842#endif /* _TCP_H */