aboutsummaryrefslogtreecommitdiffstats
path: root/net/dccp/ccids/ccid2.c
diff options
context:
space:
mode:
authorGerrit Renker <gerrit@erg.abdn.ac.uk>2008-09-04 01:30:19 -0400
committerGerrit Renker <gerrit@erg.abdn.ac.uk>2008-09-04 01:45:38 -0400
commit83337dae6ca94d801b6700600244865cd694205b (patch)
tree8d46dcc50324ce3842f94c40a332b9689e488eab /net/dccp/ccids/ccid2.c
parent146993cf5174472644ed11bd5fb539f0af8bfa49 (diff)
dccp ccid-2: Stop polling
This updates CCID2 to use the CCID dequeuing mechanism, converting from previous constant-polling to a now event-driven mechanism. Signed-off-by: Gerrit Renker <gerrit@erg.abdn.ac.uk>
Diffstat (limited to 'net/dccp/ccids/ccid2.c')
-rw-r--r--net/dccp/ccids/ccid2.c21
1 files changed, 13 insertions, 8 deletions
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index bbf16b35734d..c7d83e3c1648 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -123,12 +123,9 @@ static int ccid2_hc_tx_alloc_seq(struct ccid2_hc_tx_sock *hctx)
123 123
124static int ccid2_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb) 124static int ccid2_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
125{ 125{
126 struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); 126 if (ccid2_cwnd_network_limited(ccid2_hc_tx_sk(sk)))
127 127 return CCID_PACKET_WILL_DEQUEUE_LATER;
128 if (hctx->pipe < hctx->cwnd) 128 return CCID_PACKET_SEND_AT_ONCE;
129 return 0;
130
131 return 1; /* XXX CCID should dequeue when ready instead of polling */
132} 129}
133 130
134static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val) 131static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val)
@@ -168,6 +165,7 @@ static void ccid2_hc_tx_rto_expire(unsigned long data)
168{ 165{
169 struct sock *sk = (struct sock *)data; 166 struct sock *sk = (struct sock *)data;
170 struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); 167 struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk);
168 const bool sender_was_blocked = ccid2_cwnd_network_limited(hctx);
171 long s; 169 long s;
172 170
173 bh_lock_sock(sk); 171 bh_lock_sock(sk);
@@ -187,8 +185,6 @@ static void ccid2_hc_tx_rto_expire(unsigned long data)
187 if (s > 60) 185 if (s > 60)
188 hctx->rto = 60 * HZ; 186 hctx->rto = 60 * HZ;
189 187
190 ccid2_start_rto_timer(sk);
191
192 /* adjust pipe, cwnd etc */ 188 /* adjust pipe, cwnd etc */
193 hctx->ssthresh = hctx->cwnd / 2; 189 hctx->ssthresh = hctx->cwnd / 2;
194 if (hctx->ssthresh < 2) 190 if (hctx->ssthresh < 2)
@@ -205,6 +201,11 @@ static void ccid2_hc_tx_rto_expire(unsigned long data)
205 hctx->rpdupack = -1; 201 hctx->rpdupack = -1;
206 ccid2_change_l_ack_ratio(sk, 1); 202 ccid2_change_l_ack_ratio(sk, 1);
207 ccid2_hc_tx_check_sanity(hctx); 203 ccid2_hc_tx_check_sanity(hctx);
204
205 /* if we were blocked before, we may now send cwnd=1 packet */
206 if (sender_was_blocked)
207 tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet);
208 ccid2_start_rto_timer(sk);
208out: 209out:
209 bh_unlock_sock(sk); 210 bh_unlock_sock(sk);
210 sock_put(sk); 211 sock_put(sk);
@@ -455,6 +456,7 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
455{ 456{
456 struct dccp_sock *dp = dccp_sk(sk); 457 struct dccp_sock *dp = dccp_sk(sk);
457 struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); 458 struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk);
459 const bool sender_was_blocked = ccid2_cwnd_network_limited(hctx);
458 struct dccp_ackvec_parsed *avp; 460 struct dccp_ackvec_parsed *avp;
459 u64 ackno, seqno; 461 u64 ackno, seqno;
460 struct ccid2_seq *seqp; 462 struct ccid2_seq *seqp;
@@ -640,6 +642,9 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
640 642
641 ccid2_hc_tx_check_sanity(hctx); 643 ccid2_hc_tx_check_sanity(hctx);
642done: 644done:
645 /* check if incoming Acks allow pending packets to be sent */
646 if (sender_was_blocked && !ccid2_cwnd_network_limited(hctx))
647 tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet);
643 dccp_ackvec_parsed_cleanup(&hctx->av_chunks); 648 dccp_ackvec_parsed_cleanup(&hctx->av_chunks);
644} 649}
645 650