aboutsummaryrefslogtreecommitdiffstats
path: root/net/dccp
diff options
context:
space:
mode:
authorGerrit Renker <gerrit@erg.abdn.ac.uk>2010-10-27 15:16:28 -0400
committerDavid S. Miller <davem@davemloft.net>2010-10-28 13:27:01 -0400
commit1c0e0a0569e925220c2948ea9b92fc013895917f (patch)
tree533f97f037679a7ba92867baf816dda21a3c9f84 /net/dccp
parentb1fcf55eea541af9efa5d39f5a0d1aec8ceca55d (diff)
dccp ccid-2: Stop polling
This updates CCID-2 to use the CCID dequeuing mechanism, converting from previous continuous-polling to a now event-driven mechanism. Signed-off-by: Gerrit Renker <gerrit@erg.abdn.ac.uk> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/dccp')
-rw-r--r--net/dccp/ccids/ccid2.c23
-rw-r--r--net/dccp/ccids/ccid2.h5
2 files changed, 20 insertions, 8 deletions
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index d850e291f87..6576eae9e77 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -78,12 +78,9 @@ static int ccid2_hc_tx_alloc_seq(struct ccid2_hc_tx_sock *hc)
78 78
79static int ccid2_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb) 79static int ccid2_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
80{ 80{
81 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); 81 if (ccid2_cwnd_network_limited(ccid2_hc_tx_sk(sk)))
82 82 return CCID_PACKET_WILL_DEQUEUE_LATER;
83 if (hc->tx_pipe < hc->tx_cwnd) 83 return CCID_PACKET_SEND_AT_ONCE;
84 return 0;
85
86 return 1; /* XXX CCID should dequeue when ready instead of polling */
87} 84}
88 85
89static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val) 86static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val)
@@ -115,6 +112,7 @@ static void ccid2_hc_tx_rto_expire(unsigned long data)
115{ 112{
116 struct sock *sk = (struct sock *)data; 113 struct sock *sk = (struct sock *)data;
117 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); 114 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
115 const bool sender_was_blocked = ccid2_cwnd_network_limited(hc);
118 116
119 bh_lock_sock(sk); 117 bh_lock_sock(sk);
120 if (sock_owned_by_user(sk)) { 118 if (sock_owned_by_user(sk)) {
@@ -129,8 +127,6 @@ static void ccid2_hc_tx_rto_expire(unsigned long data)
129 if (hc->tx_rto > DCCP_RTO_MAX) 127 if (hc->tx_rto > DCCP_RTO_MAX)
130 hc->tx_rto = DCCP_RTO_MAX; 128 hc->tx_rto = DCCP_RTO_MAX;
131 129
132 sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
133
134 /* adjust pipe, cwnd etc */ 130 /* adjust pipe, cwnd etc */
135 hc->tx_ssthresh = hc->tx_cwnd / 2; 131 hc->tx_ssthresh = hc->tx_cwnd / 2;
136 if (hc->tx_ssthresh < 2) 132 if (hc->tx_ssthresh < 2)
@@ -146,6 +142,12 @@ static void ccid2_hc_tx_rto_expire(unsigned long data)
146 hc->tx_rpseq = 0; 142 hc->tx_rpseq = 0;
147 hc->tx_rpdupack = -1; 143 hc->tx_rpdupack = -1;
148 ccid2_change_l_ack_ratio(sk, 1); 144 ccid2_change_l_ack_ratio(sk, 1);
145
146 /* if we were blocked before, we may now send cwnd=1 packet */
147 if (sender_was_blocked)
148 tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet);
149 /* restart backed-off timer */
150 sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
149out: 151out:
150 bh_unlock_sock(sk); 152 bh_unlock_sock(sk);
151 sock_put(sk); 153 sock_put(sk);
@@ -434,6 +436,7 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
434{ 436{
435 struct dccp_sock *dp = dccp_sk(sk); 437 struct dccp_sock *dp = dccp_sk(sk);
436 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); 438 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
439 const bool sender_was_blocked = ccid2_cwnd_network_limited(hc);
437 u64 ackno, seqno; 440 u64 ackno, seqno;
438 struct ccid2_seq *seqp; 441 struct ccid2_seq *seqp;
439 unsigned char *vector; 442 unsigned char *vector;
@@ -631,6 +634,10 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
631 sk_stop_timer(sk, &hc->tx_rtotimer); 634 sk_stop_timer(sk, &hc->tx_rtotimer);
632 else 635 else
633 sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto); 636 sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
637
638 /* check if incoming Acks allow pending packets to be sent */
639 if (sender_was_blocked && !ccid2_cwnd_network_limited(hc))
640 tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet);
634} 641}
635 642
636static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk) 643static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk)
diff --git a/net/dccp/ccids/ccid2.h b/net/dccp/ccids/ccid2.h
index 9731c2dc148..25cb6b216ed 100644
--- a/net/dccp/ccids/ccid2.h
+++ b/net/dccp/ccids/ccid2.h
@@ -81,6 +81,11 @@ struct ccid2_hc_tx_sock {
81 u64 tx_high_ack; 81 u64 tx_high_ack;
82}; 82};
83 83
84static inline bool ccid2_cwnd_network_limited(struct ccid2_hc_tx_sock *hc)
85{
86 return hc->tx_pipe >= hc->tx_cwnd;
87}
88
84struct ccid2_hc_rx_sock { 89struct ccid2_hc_rx_sock {
85 int rx_data; 90 int rx_data;
86}; 91};