aboutsummaryrefslogtreecommitdiffstats
path: root/include/net/tcp.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/net/tcp.h')
-rw-r--r--include/net/tcp.h74
1 files changed, 42 insertions, 32 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h
index d489ac548e4b..0b3f7294c5c7 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -669,29 +669,29 @@ struct tcp_congestion_ops {
669 struct list_head list; 669 struct list_head list;
670 670
671 /* initialize private data (optional) */ 671 /* initialize private data (optional) */
672 void (*init)(struct tcp_sock *tp); 672 void (*init)(struct sock *sk);
673 /* cleanup private data (optional) */ 673 /* cleanup private data (optional) */
674 void (*release)(struct tcp_sock *tp); 674 void (*release)(struct sock *sk);
675 675
676 /* return slow start threshold (required) */ 676 /* return slow start threshold (required) */
677 u32 (*ssthresh)(struct tcp_sock *tp); 677 u32 (*ssthresh)(struct sock *sk);
678 /* lower bound for congestion window (optional) */ 678 /* lower bound for congestion window (optional) */
679 u32 (*min_cwnd)(struct tcp_sock *tp); 679 u32 (*min_cwnd)(struct sock *sk);
680 /* do new cwnd calculation (required) */ 680 /* do new cwnd calculation (required) */
681 void (*cong_avoid)(struct tcp_sock *tp, u32 ack, 681 void (*cong_avoid)(struct sock *sk, u32 ack,
682 u32 rtt, u32 in_flight, int good_ack); 682 u32 rtt, u32 in_flight, int good_ack);
683 /* round trip time sample per acked packet (optional) */ 683 /* round trip time sample per acked packet (optional) */
684 void (*rtt_sample)(struct tcp_sock *tp, u32 usrtt); 684 void (*rtt_sample)(struct sock *sk, u32 usrtt);
685 /* call before changing ca_state (optional) */ 685 /* call before changing ca_state (optional) */
686 void (*set_state)(struct tcp_sock *tp, u8 new_state); 686 void (*set_state)(struct sock *sk, u8 new_state);
687 /* call when cwnd event occurs (optional) */ 687 /* call when cwnd event occurs (optional) */
688 void (*cwnd_event)(struct tcp_sock *tp, enum tcp_ca_event ev); 688 void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
689 /* new value of cwnd after loss (optional) */ 689 /* new value of cwnd after loss (optional) */
690 u32 (*undo_cwnd)(struct tcp_sock *tp); 690 u32 (*undo_cwnd)(struct sock *sk);
691 /* hook for packet ack accounting (optional) */ 691 /* hook for packet ack accounting (optional) */
692 void (*pkts_acked)(struct tcp_sock *tp, u32 num_acked); 692 void (*pkts_acked)(struct sock *sk, u32 num_acked);
693 /* get info for tcp_diag (optional) */ 693 /* get info for tcp_diag (optional) */
694 void (*get_info)(struct tcp_sock *tp, u32 ext, struct sk_buff *skb); 694 void (*get_info)(struct sock *sk, u32 ext, struct sk_buff *skb);
695 695
696 char name[TCP_CA_NAME_MAX]; 696 char name[TCP_CA_NAME_MAX];
697 struct module *owner; 697 struct module *owner;
@@ -700,30 +700,34 @@ struct tcp_congestion_ops {
700extern int tcp_register_congestion_control(struct tcp_congestion_ops *type); 700extern int tcp_register_congestion_control(struct tcp_congestion_ops *type);
701extern void tcp_unregister_congestion_control(struct tcp_congestion_ops *type); 701extern void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
702 702
703extern void tcp_init_congestion_control(struct tcp_sock *tp); 703extern void tcp_init_congestion_control(struct sock *sk);
704extern void tcp_cleanup_congestion_control(struct tcp_sock *tp); 704extern void tcp_cleanup_congestion_control(struct sock *sk);
705extern int tcp_set_default_congestion_control(const char *name); 705extern int tcp_set_default_congestion_control(const char *name);
706extern void tcp_get_default_congestion_control(char *name); 706extern void tcp_get_default_congestion_control(char *name);
707extern int tcp_set_congestion_control(struct tcp_sock *tp, const char *name); 707extern int tcp_set_congestion_control(struct sock *sk, const char *name);
708 708
709extern struct tcp_congestion_ops tcp_init_congestion_ops; 709extern struct tcp_congestion_ops tcp_init_congestion_ops;
710extern u32 tcp_reno_ssthresh(struct tcp_sock *tp); 710extern u32 tcp_reno_ssthresh(struct sock *sk);
711extern void tcp_reno_cong_avoid(struct tcp_sock *tp, u32 ack, 711extern void tcp_reno_cong_avoid(struct sock *sk, u32 ack,
712 u32 rtt, u32 in_flight, int flag); 712 u32 rtt, u32 in_flight, int flag);
713extern u32 tcp_reno_min_cwnd(struct tcp_sock *tp); 713extern u32 tcp_reno_min_cwnd(struct sock *sk);
714extern struct tcp_congestion_ops tcp_reno; 714extern struct tcp_congestion_ops tcp_reno;
715 715
716static inline void tcp_set_ca_state(struct tcp_sock *tp, u8 ca_state) 716static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
717{ 717{
718 if (tp->ca_ops->set_state) 718 struct inet_connection_sock *icsk = inet_csk(sk);
719 tp->ca_ops->set_state(tp, ca_state); 719
720 tp->ca_state = ca_state; 720 if (icsk->icsk_ca_ops->set_state)
721 icsk->icsk_ca_ops->set_state(sk, ca_state);
722 icsk->icsk_ca_state = ca_state;
721} 723}
722 724
723static inline void tcp_ca_event(struct tcp_sock *tp, enum tcp_ca_event event) 725static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
724{ 726{
725 if (tp->ca_ops->cwnd_event) 727 const struct inet_connection_sock *icsk = inet_csk(sk);
726 tp->ca_ops->cwnd_event(tp, event); 728
729 if (icsk->icsk_ca_ops->cwnd_event)
730 icsk->icsk_ca_ops->cwnd_event(sk, event);
727} 731}
728 732
729/* This determines how many packets are "in the network" to the best 733/* This determines how many packets are "in the network" to the best
@@ -749,9 +753,10 @@ static __inline__ unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
749 * The exception is rate halving phase, when cwnd is decreasing towards 753 * The exception is rate halving phase, when cwnd is decreasing towards
750 * ssthresh. 754 * ssthresh.
751 */ 755 */
752static inline __u32 tcp_current_ssthresh(struct tcp_sock *tp) 756static inline __u32 tcp_current_ssthresh(const struct sock *sk)
753{ 757{
754 if ((1<<tp->ca_state)&(TCPF_CA_CWR|TCPF_CA_Recovery)) 758 const struct tcp_sock *tp = tcp_sk(sk);
759 if ((1 << inet_csk(sk)->icsk_ca_state) & (TCPF_CA_CWR | TCPF_CA_Recovery))
755 return tp->snd_ssthresh; 760 return tp->snd_ssthresh;
756 else 761 else
757 return max(tp->snd_ssthresh, 762 return max(tp->snd_ssthresh,
@@ -768,10 +773,13 @@ static inline void tcp_sync_left_out(struct tcp_sock *tp)
768} 773}
769 774
770/* Set slow start threshold and cwnd not falling to slow start */ 775/* Set slow start threshold and cwnd not falling to slow start */
771static inline void __tcp_enter_cwr(struct tcp_sock *tp) 776static inline void __tcp_enter_cwr(struct sock *sk)
772{ 777{
778 const struct inet_connection_sock *icsk = inet_csk(sk);
779 struct tcp_sock *tp = tcp_sk(sk);
780
773 tp->undo_marker = 0; 781 tp->undo_marker = 0;
774 tp->snd_ssthresh = tp->ca_ops->ssthresh(tp); 782 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
775 tp->snd_cwnd = min(tp->snd_cwnd, 783 tp->snd_cwnd = min(tp->snd_cwnd,
776 tcp_packets_in_flight(tp) + 1U); 784 tcp_packets_in_flight(tp) + 1U);
777 tp->snd_cwnd_cnt = 0; 785 tp->snd_cwnd_cnt = 0;
@@ -780,12 +788,14 @@ static inline void __tcp_enter_cwr(struct tcp_sock *tp)
780 TCP_ECN_queue_cwr(tp); 788 TCP_ECN_queue_cwr(tp);
781} 789}
782 790
783static inline void tcp_enter_cwr(struct tcp_sock *tp) 791static inline void tcp_enter_cwr(struct sock *sk)
784{ 792{
793 struct tcp_sock *tp = tcp_sk(sk);
794
785 tp->prior_ssthresh = 0; 795 tp->prior_ssthresh = 0;
786 if (tp->ca_state < TCP_CA_CWR) { 796 if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
787 __tcp_enter_cwr(tp); 797 __tcp_enter_cwr(sk);
788 tcp_set_ca_state(tp, TCP_CA_CWR); 798 tcp_set_ca_state(sk, TCP_CA_CWR);
789 } 799 }
790} 800}
791 801