aboutsummaryrefslogtreecommitdiffstats
path: root/net/dccp/input.c
diff options
context:
space:
mode:
authorJeff Garzik <jgarzik@pobox.com>2005-09-24 00:25:02 -0400
committerJeff Garzik <jgarzik@pobox.com>2005-09-24 00:25:02 -0400
commitc1d9728ecc5b560465df3c0c0d3b3825c2710b40 (patch)
treed0abb5c923a7a3eca2d4b2c3e1964bf484870909 /net/dccp/input.c
parent165415f700b0c77fa1f8db6198f48582639adf78 (diff)
parent87e807b6c461bbd449496a4c3ab78ab164a4ba97 (diff)
Merge /spare/repo/linux-2.6/
Diffstat (limited to 'net/dccp/input.c')
-rw-r--r--net/dccp/input.c94
1 files changed, 29 insertions, 65 deletions
diff --git a/net/dccp/input.c b/net/dccp/input.c
index c60bc3433f5e..1b6b2cb12376 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -16,6 +16,7 @@
16 16
17#include <net/sock.h> 17#include <net/sock.h>
18 18
19#include "ackvec.h"
19#include "ccid.h" 20#include "ccid.h"
20#include "dccp.h" 21#include "dccp.h"
21 22
@@ -50,7 +51,8 @@ static void dccp_rcv_closereq(struct sock *sk, struct sk_buff *skb)
50 return; 51 return;
51 } 52 }
52 53
53 dccp_set_state(sk, DCCP_CLOSING); 54 if (sk->sk_state != DCCP_CLOSING)
55 dccp_set_state(sk, DCCP_CLOSING);
54 dccp_send_close(sk, 0); 56 dccp_send_close(sk, 0);
55} 57}
56 58
@@ -59,8 +61,8 @@ static inline void dccp_event_ack_recv(struct sock *sk, struct sk_buff *skb)
59 struct dccp_sock *dp = dccp_sk(sk); 61 struct dccp_sock *dp = dccp_sk(sk);
60 62
61 if (dp->dccps_options.dccpo_send_ack_vector) 63 if (dp->dccps_options.dccpo_send_ack_vector)
62 dccp_ackpkts_check_rcv_ackno(dp->dccps_hc_rx_ackpkts, sk, 64 dccp_ackvec_check_rcv_ackno(dp->dccps_hc_rx_ackvec, sk,
63 DCCP_SKB_CB(skb)->dccpd_ack_seq); 65 DCCP_SKB_CB(skb)->dccpd_ack_seq);
64} 66}
65 67
66static int dccp_check_seqno(struct sock *sk, struct sk_buff *skb) 68static int dccp_check_seqno(struct sock *sk, struct sk_buff *skb)
@@ -163,37 +165,11 @@ int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
163 if (DCCP_SKB_CB(skb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ) 165 if (DCCP_SKB_CB(skb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
164 dccp_event_ack_recv(sk, skb); 166 dccp_event_ack_recv(sk, skb);
165 167
166 /* 168 if (dp->dccps_options.dccpo_send_ack_vector &&
167 * FIXME: check ECN to see if we should use 169 dccp_ackvec_add(dp->dccps_hc_rx_ackvec, sk,
168 * DCCP_ACKPKTS_STATE_ECN_MARKED 170 DCCP_SKB_CB(skb)->dccpd_seq,
169 */ 171 DCCP_ACKVEC_STATE_RECEIVED))
170 if (dp->dccps_options.dccpo_send_ack_vector) { 172 goto discard;
171 struct dccp_ackpkts *ap = dp->dccps_hc_rx_ackpkts;
172
173 if (dccp_ackpkts_add(dp->dccps_hc_rx_ackpkts, sk,
174 DCCP_SKB_CB(skb)->dccpd_seq,
175 DCCP_ACKPKTS_STATE_RECEIVED)) {
176 LIMIT_NETDEBUG(KERN_WARNING "DCCP: acknowledgeable "
177 "packets buffer full!\n");
178 ap->dccpap_ack_seqno = DCCP_MAX_SEQNO + 1;
179 inet_csk_schedule_ack(sk);
180 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
181 TCP_DELACK_MIN,
182 DCCP_RTO_MAX);
183 goto discard;
184 }
185
186 /*
187 * FIXME: this activation is probably wrong, have to study more
188 * TCP delack machinery and how it fits into DCCP draft, but
189 * for now it kinda "works" 8)
190 */
191 if (!inet_csk_ack_scheduled(sk)) {
192 inet_csk_schedule_ack(sk);
193 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, 5 * HZ,
194 DCCP_RTO_MAX);
195 }
196 }
197 173
198 ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb); 174 ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb);
199 ccid_hc_tx_packet_recv(dp->dccps_hc_tx_ccid, sk, skb); 175 ccid_hc_tx_packet_recv(dp->dccps_hc_tx_ccid, sk, skb);
@@ -383,9 +359,9 @@ static int dccp_rcv_request_sent_state_process(struct sock *sk,
383 } 359 }
384 360
385out_invalid_packet: 361out_invalid_packet:
386 return 1; /* dccp_v4_do_rcv will send a reset, but... 362 /* dccp_v4_do_rcv will send a reset */
387 FIXME: the reset code should be 363 DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_PACKET_ERROR;
388 DCCP_RESET_CODE_PACKET_ERROR */ 364 return 1;
389} 365}
390 366
391static int dccp_rcv_respond_partopen_state_process(struct sock *sk, 367static int dccp_rcv_respond_partopen_state_process(struct sock *sk,
@@ -432,6 +408,7 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
432 struct dccp_hdr *dh, unsigned len) 408 struct dccp_hdr *dh, unsigned len)
433{ 409{
434 struct dccp_sock *dp = dccp_sk(sk); 410 struct dccp_sock *dp = dccp_sk(sk);
411 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
435 const int old_state = sk->sk_state; 412 const int old_state = sk->sk_state;
436 int queued = 0; 413 int queued = 0;
437 414
@@ -472,7 +449,8 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
472 if (dh->dccph_type == DCCP_PKT_RESET) 449 if (dh->dccph_type == DCCP_PKT_RESET)
473 goto discard; 450 goto discard;
474 451
475 /* Caller (dccp_v4_do_rcv) will send Reset(No Connection)*/ 452 /* Caller (dccp_v4_do_rcv) will send Reset */
453 dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION;
476 return 1; 454 return 1;
477 } 455 }
478 456
@@ -486,36 +464,17 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
486 if (dccp_parse_options(sk, skb)) 464 if (dccp_parse_options(sk, skb))
487 goto discard; 465 goto discard;
488 466
489 if (DCCP_SKB_CB(skb)->dccpd_ack_seq != 467 if (dcb->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
490 DCCP_PKT_WITHOUT_ACK_SEQ)
491 dccp_event_ack_recv(sk, skb); 468 dccp_event_ack_recv(sk, skb);
492 469
493 ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb); 470 ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb);
494 ccid_hc_tx_packet_recv(dp->dccps_hc_tx_ccid, sk, skb); 471 ccid_hc_tx_packet_recv(dp->dccps_hc_tx_ccid, sk, skb);
495 472
496 /* 473 if (dp->dccps_options.dccpo_send_ack_vector &&
497 * FIXME: check ECN to see if we should use 474 dccp_ackvec_add(dp->dccps_hc_rx_ackvec, sk,
498 * DCCP_ACKPKTS_STATE_ECN_MARKED 475 DCCP_SKB_CB(skb)->dccpd_seq,
499 */ 476 DCCP_ACKVEC_STATE_RECEIVED))
500 if (dp->dccps_options.dccpo_send_ack_vector) { 477 goto discard;
501 if (dccp_ackpkts_add(dp->dccps_hc_rx_ackpkts, sk,
502 DCCP_SKB_CB(skb)->dccpd_seq,
503 DCCP_ACKPKTS_STATE_RECEIVED))
504 goto discard;
505 /*
506 * FIXME: this activation is probably wrong, have to
507 * study more TCP delack machinery and how it fits into
508 * DCCP draft, but for now it kinda "works" 8)
509 */
510 if ((dp->dccps_hc_rx_ackpkts->dccpap_ack_seqno ==
511 DCCP_MAX_SEQNO + 1) &&
512 !inet_csk_ack_scheduled(sk)) {
513 inet_csk_schedule_ack(sk);
514 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
515 TCP_DELACK_MIN,
516 DCCP_RTO_MAX);
517 }
518 }
519 } 478 }
520 479
521 /* 480 /*
@@ -550,8 +509,7 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
550 dh->dccph_type == DCCP_PKT_REQUEST) || 509 dh->dccph_type == DCCP_PKT_REQUEST) ||
551 (sk->sk_state == DCCP_RESPOND && 510 (sk->sk_state == DCCP_RESPOND &&
552 dh->dccph_type == DCCP_PKT_DATA)) { 511 dh->dccph_type == DCCP_PKT_DATA)) {
553 dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq, 512 dccp_send_sync(sk, dcb->dccpd_seq, DCCP_PKT_SYNC);
554 DCCP_PKT_SYNC);
555 goto discard; 513 goto discard;
556 } else if (dh->dccph_type == DCCP_PKT_CLOSEREQ) { 514 } else if (dh->dccph_type == DCCP_PKT_CLOSEREQ) {
557 dccp_rcv_closereq(sk, skb); 515 dccp_rcv_closereq(sk, skb);
@@ -561,8 +519,14 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
561 return 0; 519 return 0;
562 } 520 }
563 521
522 if (unlikely(dh->dccph_type == DCCP_PKT_SYNC)) {
523 dccp_send_sync(sk, dcb->dccpd_seq, DCCP_PKT_SYNCACK);
524 goto discard;
525 }
526
564 switch (sk->sk_state) { 527 switch (sk->sk_state) {
565 case DCCP_CLOSED: 528 case DCCP_CLOSED:
529 dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION;
566 return 1; 530 return 1;
567 531
568 case DCCP_REQUESTING: 532 case DCCP_REQUESTING: