aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/dccp.h2
-rw-r--r--net/dccp/dccp.h2
-rw-r--r--net/dccp/output.c90
-rw-r--r--net/dccp/proto.c16
4 files changed, 73 insertions, 37 deletions
diff --git a/include/linux/dccp.h b/include/linux/dccp.h
index 676333b9fad0..2d7671c92c0b 100644
--- a/include/linux/dccp.h
+++ b/include/linux/dccp.h
@@ -438,6 +438,7 @@ struct dccp_ackvec;
438 * @dccps_role - Role of this sock, one of %dccp_role 438 * @dccps_role - Role of this sock, one of %dccp_role
439 * @dccps_ndp_count - number of Non Data Packets since last data packet 439 * @dccps_ndp_count - number of Non Data Packets since last data packet
440 * @dccps_hc_rx_ackvec - rx half connection ack vector 440 * @dccps_hc_rx_ackvec - rx half connection ack vector
441 * @dccps_xmit_timer - timer for when CCID is not ready to send
441 */ 442 */
442struct dccp_sock { 443struct dccp_sock {
443 /* inet_connection_sock has to be the first member of dccp_sock */ 444 /* inet_connection_sock has to be the first member of dccp_sock */
@@ -470,6 +471,7 @@ struct dccp_sock {
470 enum dccp_role dccps_role:2; 471 enum dccp_role dccps_role:2;
471 __u8 dccps_hc_rx_insert_options:1; 472 __u8 dccps_hc_rx_insert_options:1;
472 __u8 dccps_hc_tx_insert_options:1; 473 __u8 dccps_hc_tx_insert_options:1;
474 struct timer_list dccps_xmit_timer;
473}; 475};
474 476
475static inline struct dccp_sock *dccp_sk(const struct sock *sk) 477static inline struct dccp_sock *dccp_sk(const struct sock *sk)
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index a5c5475724c0..0a21be437ed3 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -130,7 +130,7 @@ extern void dccp_send_delayed_ack(struct sock *sk);
130extern void dccp_send_sync(struct sock *sk, const u64 seq, 130extern void dccp_send_sync(struct sock *sk, const u64 seq,
131 const enum dccp_pkt_type pkt_type); 131 const enum dccp_pkt_type pkt_type);
132 132
133extern int dccp_write_xmit(struct sock *sk, struct sk_buff *skb, long *timeo); 133extern void dccp_write_xmit(struct sock *sk, int block);
134extern void dccp_write_space(struct sock *sk); 134extern void dccp_write_space(struct sock *sk);
135 135
136extern void dccp_init_xmit_timers(struct sock *sk); 136extern void dccp_init_xmit_timers(struct sock *sk);
diff --git a/net/dccp/output.c b/net/dccp/output.c
index 58669beee132..7102e3aed4ca 100644
--- a/net/dccp/output.c
+++ b/net/dccp/output.c
@@ -198,7 +198,7 @@ static int dccp_wait_for_ccid(struct sock *sk, struct sk_buff *skb,
198 while (1) { 198 while (1) {
199 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 199 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
200 200
201 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) 201 if (sk->sk_err)
202 goto do_error; 202 goto do_error;
203 if (!*timeo) 203 if (!*timeo)
204 goto do_nonblock; 204 goto do_nonblock;
@@ -234,37 +234,72 @@ do_interrupted:
234 goto out; 234 goto out;
235} 235}
236 236
237int dccp_write_xmit(struct sock *sk, struct sk_buff *skb, long *timeo) 237static void dccp_write_xmit_timer(unsigned long data) {
238 struct sock *sk = (struct sock *)data;
239 struct dccp_sock *dp = dccp_sk(sk);
240
241 bh_lock_sock(sk);
242 if (sock_owned_by_user(sk))
243 sk_reset_timer(sk, &dp->dccps_xmit_timer, jiffies+1);
244 else
245 dccp_write_xmit(sk, 0);
246 bh_unlock_sock(sk);
247 sock_put(sk);
248}
249
250void dccp_write_xmit(struct sock *sk, int block)
238{ 251{
239 const struct dccp_sock *dp = dccp_sk(sk); 252 struct dccp_sock *dp = dccp_sk(sk);
240 int err = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb, 253 struct sk_buff *skb;
254 long timeo = 30000; /* If a packet is taking longer than 2 secs
255 we have other issues */
256
257 while ((skb = skb_peek(&sk->sk_write_queue))) {
258 int err = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb,
241 skb->len); 259 skb->len);
242 260
243 if (err > 0) 261 if (err > 0) {
244 err = dccp_wait_for_ccid(sk, skb, timeo); 262 if (!block) {
263 sk_reset_timer(sk, &dp->dccps_xmit_timer,
264 msecs_to_jiffies(err)+jiffies);
265 break;
266 } else
267 err = dccp_wait_for_ccid(sk, skb, &timeo);
268 if (err) {
269 printk(KERN_CRIT "%s:err at dccp_wait_for_ccid"
270 " %d\n", __FUNCTION__, err);
271 dump_stack();
272 }
273 }
245 274
246 if (err == 0) { 275 skb_dequeue(&sk->sk_write_queue);
247 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); 276 if (err == 0) {
248 const int len = skb->len; 277 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
278 const int len = skb->len;
249 279
250 if (sk->sk_state == DCCP_PARTOPEN) { 280 if (sk->sk_state == DCCP_PARTOPEN) {
251 /* See 8.1.5. Handshake Completion */ 281 /* See 8.1.5. Handshake Completion */
252 inet_csk_schedule_ack(sk); 282 inet_csk_schedule_ack(sk);
253 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, 283 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
254 inet_csk(sk)->icsk_rto, 284 inet_csk(sk)->icsk_rto,
255 DCCP_RTO_MAX); 285 DCCP_RTO_MAX);
256 dcb->dccpd_type = DCCP_PKT_DATAACK; 286 dcb->dccpd_type = DCCP_PKT_DATAACK;
257 } else if (dccp_ack_pending(sk)) 287 } else if (dccp_ack_pending(sk))
258 dcb->dccpd_type = DCCP_PKT_DATAACK; 288 dcb->dccpd_type = DCCP_PKT_DATAACK;
259 else 289 else
260 dcb->dccpd_type = DCCP_PKT_DATA; 290 dcb->dccpd_type = DCCP_PKT_DATA;
261 291
262 err = dccp_transmit_skb(sk, skb); 292 err = dccp_transmit_skb(sk, skb);
263 ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, 0, len); 293 ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, 0, len);
264 } else 294 if (err) {
265 kfree_skb(skb); 295 printk(KERN_CRIT "%s:err from "
266 296 "ccid_hc_tx_packet_sent %d\n",
267 return err; 297 __FUNCTION__, err);
298 dump_stack();
299 }
300 } else
301 kfree(skb);
302 }
268} 303}
269 304
270int dccp_retransmit_skb(struct sock *sk, struct sk_buff *skb) 305int dccp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
@@ -426,6 +461,9 @@ static inline void dccp_connect_init(struct sock *sk)
426 dccp_set_seqno(&dp->dccps_awl, max48(dp->dccps_awl, dp->dccps_iss)); 461 dccp_set_seqno(&dp->dccps_awl, max48(dp->dccps_awl, dp->dccps_iss));
427 462
428 icsk->icsk_retransmits = 0; 463 icsk->icsk_retransmits = 0;
464 init_timer(&dp->dccps_xmit_timer);
465 dp->dccps_xmit_timer.data = (unsigned long)sk;
466 dp->dccps_xmit_timer.function = dccp_write_xmit_timer;
429} 467}
430 468
431int dccp_connect(struct sock *sk) 469int dccp_connect(struct sock *sk)
@@ -560,8 +598,10 @@ void dccp_send_close(struct sock *sk, const int active)
560 DCCP_PKT_CLOSE : DCCP_PKT_CLOSEREQ; 598 DCCP_PKT_CLOSE : DCCP_PKT_CLOSEREQ;
561 599
562 if (active) { 600 if (active) {
601 dccp_write_xmit(sk, 1);
563 dccp_skb_entail(sk, skb); 602 dccp_skb_entail(sk, skb);
564 dccp_transmit_skb(sk, skb_clone(skb, prio)); 603 dccp_transmit_skb(sk, skb_clone(skb, prio));
604 /* FIXME do we need a retransmit timer here? */
565 } else 605 } else
566 dccp_transmit_skb(sk, skb); 606 dccp_transmit_skb(sk, skb);
567} 607}
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 6f14bb5a28d4..962df0ea31aa 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -662,17 +662,8 @@ int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
662 if (rc != 0) 662 if (rc != 0)
663 goto out_discard; 663 goto out_discard;
664 664
665 rc = dccp_write_xmit(sk, skb, &timeo); 665 skb_queue_tail(&sk->sk_write_queue, skb);
666 /* 666 dccp_write_xmit(sk,0);
667 * XXX we don't use sk_write_queue, so just discard the packet.
668 * Current plan however is to _use_ sk_write_queue with
669 * an algorith similar to tcp_sendmsg, where the main difference
670 * is that in DCCP we have to respect packet boundaries, so
671 * no coalescing of skbs.
672 *
673 * This bug was _quickly_ found & fixed by just looking at an OSTRA
674 * generated callgraph 8) -acme
675 */
676out_release: 667out_release:
677 release_sock(sk); 668 release_sock(sk);
678 return rc ? : len; 669 return rc ? : len;
@@ -846,6 +837,7 @@ static int dccp_close_state(struct sock *sk)
846 837
847void dccp_close(struct sock *sk, long timeout) 838void dccp_close(struct sock *sk, long timeout)
848{ 839{
840 struct dccp_sock *dp = dccp_sk(sk);
849 struct sk_buff *skb; 841 struct sk_buff *skb;
850 int state; 842 int state;
851 843
@@ -862,6 +854,8 @@ void dccp_close(struct sock *sk, long timeout)
862 goto adjudge_to_death; 854 goto adjudge_to_death;
863 } 855 }
864 856
857 sk_stop_timer(sk, &dp->dccps_xmit_timer);
858
865 /* 859 /*
866 * We need to flush the recv. buffs. We do this only on the 860 * We need to flush the recv. buffs. We do this only on the
867 * descriptor close, not protocol-sourced closes, because the 861 * descriptor close, not protocol-sourced closes, because the