diff options
author | David Howells <dhowells@redhat.com> | 2006-12-05 09:37:56 -0500 |
---|---|---|
committer | David Howells <dhowells@warthog.cambridge.redhat.com> | 2006-12-05 09:37:56 -0500 |
commit | 4c1ac1b49122b805adfa4efc620592f68dccf5db (patch) | |
tree | 87557f4bc2fd4fe65b7570489c2f610c45c0adcd /net/dccp | |
parent | c4028958b6ecad064b1a6303a6a5906d4fe48d73 (diff) | |
parent | d916faace3efc0bf19fe9a615a1ab8fa1a24cd93 (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts:
drivers/infiniband/core/iwcm.c
drivers/net/chelsio/cxgb2.c
drivers/net/wireless/bcm43xx/bcm43xx_main.c
drivers/net/wireless/prism54/islpci_eth.c
drivers/usb/core/hub.h
drivers/usb/input/hid-core.c
net/core/netpoll.c
Fix up merge failures with Linus's head and fix new compilation failures.
Signed-Off-By: David Howells <dhowells@redhat.com>
Diffstat (limited to 'net/dccp')
-rw-r--r-- | net/dccp/Kconfig | 5 | ||||
-rw-r--r-- | net/dccp/Makefile | 8 | ||||
-rw-r--r-- | net/dccp/ackvec.c | 116 | ||||
-rw-r--r-- | net/dccp/ackvec.h | 20 | ||||
-rw-r--r-- | net/dccp/ccid.h | 12 | ||||
-rw-r--r-- | net/dccp/ccids/Kconfig | 25 | ||||
-rw-r--r-- | net/dccp/ccids/ccid2.c | 45 | ||||
-rw-r--r-- | net/dccp/ccids/ccid2.h | 3 | ||||
-rw-r--r-- | net/dccp/ccids/ccid3.c | 549 | ||||
-rw-r--r-- | net/dccp/ccids/ccid3.h | 119 | ||||
-rw-r--r-- | net/dccp/ccids/lib/loss_interval.c | 6 | ||||
-rw-r--r-- | net/dccp/ccids/lib/tfrc_equation.c | 7 | ||||
-rw-r--r-- | net/dccp/dccp.h | 110 | ||||
-rw-r--r-- | net/dccp/feat.c | 131 | ||||
-rw-r--r-- | net/dccp/feat.h | 48 | ||||
-rw-r--r-- | net/dccp/input.c | 63 | ||||
-rw-r--r-- | net/dccp/ipv4.c | 537 | ||||
-rw-r--r-- | net/dccp/ipv6.c | 613 | ||||
-rw-r--r-- | net/dccp/minisocks.c | 48 | ||||
-rw-r--r-- | net/dccp/options.c | 61 | ||||
-rw-r--r-- | net/dccp/output.c | 84 | ||||
-rw-r--r-- | net/dccp/probe.c | 8 | ||||
-rw-r--r-- | net/dccp/proto.c | 64 | ||||
-rw-r--r-- | net/dccp/sysctl.c | 60 | ||||
-rw-r--r-- | net/dccp/timer.c | 134 |
25 files changed, 1476 insertions, 1400 deletions
diff --git a/net/dccp/Kconfig b/net/dccp/Kconfig index ef8919cca74b..b8a68dd41000 100644 --- a/net/dccp/Kconfig +++ b/net/dccp/Kconfig | |||
@@ -38,6 +38,9 @@ config IP_DCCP_DEBUG | |||
38 | ---help--- | 38 | ---help--- |
39 | Only use this if you're hacking DCCP. | 39 | Only use this if you're hacking DCCP. |
40 | 40 | ||
41 | When compiling DCCP as a module, this debugging output can be toggled | ||
42 | by setting the parameter dccp_debug of the `dccp' module to 0 or 1. | ||
43 | |||
41 | Just say N. | 44 | Just say N. |
42 | 45 | ||
43 | config NET_DCCPPROBE | 46 | config NET_DCCPPROBE |
@@ -49,7 +52,7 @@ config NET_DCCPPROBE | |||
49 | DCCP congestion avoidance modules. If you don't understand | 52 | DCCP congestion avoidance modules. If you don't understand |
50 | what was just said, you don't need it: say N. | 53 | what was just said, you don't need it: say N. |
51 | 54 | ||
52 | Documentation on how to use the packet generator can be found | 55 | Documentation on how to use DCCP connection probing can be found |
53 | at http://linux-net.osdl.org/index.php/DccpProbe | 56 | at http://linux-net.osdl.org/index.php/DccpProbe |
54 | 57 | ||
55 | To compile this code as a module, choose M here: the | 58 | To compile this code as a module, choose M here: the |
diff --git a/net/dccp/Makefile b/net/dccp/Makefile index 17ed99c46617..f4f8793aafff 100644 --- a/net/dccp/Makefile +++ b/net/dccp/Makefile | |||
@@ -1,13 +1,13 @@ | |||
1 | obj-$(CONFIG_IPV6) += dccp_ipv6.o | ||
2 | |||
3 | dccp_ipv6-y := ipv6.o | ||
4 | |||
5 | obj-$(CONFIG_IP_DCCP) += dccp.o dccp_ipv4.o | 1 | obj-$(CONFIG_IP_DCCP) += dccp.o dccp_ipv4.o |
6 | 2 | ||
7 | dccp-y := ccid.o feat.o input.o minisocks.o options.o output.o proto.o timer.o | 3 | dccp-y := ccid.o feat.o input.o minisocks.o options.o output.o proto.o timer.o |
8 | 4 | ||
9 | dccp_ipv4-y := ipv4.o | 5 | dccp_ipv4-y := ipv4.o |
10 | 6 | ||
7 | # build dccp_ipv6 as module whenever either IPv6 or DCCP is a module | ||
8 | obj-$(subst y,$(CONFIG_IP_DCCP),$(CONFIG_IPV6)) += dccp_ipv6.o | ||
9 | dccp_ipv6-y := ipv6.o | ||
10 | |||
11 | dccp-$(CONFIG_IP_DCCP_ACKVEC) += ackvec.o | 11 | dccp-$(CONFIG_IP_DCCP_ACKVEC) += ackvec.o |
12 | 12 | ||
13 | obj-$(CONFIG_INET_DCCP_DIAG) += dccp_diag.o | 13 | obj-$(CONFIG_INET_DCCP_DIAG) += dccp_diag.o |
diff --git a/net/dccp/ackvec.c b/net/dccp/ackvec.c index f8208874ac7d..bdf1bb7a82c0 100644 --- a/net/dccp/ackvec.c +++ b/net/dccp/ackvec.c | |||
@@ -67,15 +67,16 @@ static void dccp_ackvec_insert_avr(struct dccp_ackvec *av, | |||
67 | int dccp_insert_option_ackvec(struct sock *sk, struct sk_buff *skb) | 67 | int dccp_insert_option_ackvec(struct sock *sk, struct sk_buff *skb) |
68 | { | 68 | { |
69 | struct dccp_sock *dp = dccp_sk(sk); | 69 | struct dccp_sock *dp = dccp_sk(sk); |
70 | #ifdef CONFIG_IP_DCCP_DEBUG | ||
71 | const char *debug_prefix = dp->dccps_role == DCCP_ROLE_CLIENT ? | ||
72 | "CLIENT tx: " : "server tx: "; | ||
73 | #endif | ||
74 | struct dccp_ackvec *av = dp->dccps_hc_rx_ackvec; | 70 | struct dccp_ackvec *av = dp->dccps_hc_rx_ackvec; |
75 | int len = av->dccpav_vec_len + 2; | 71 | /* Figure out how many options do we need to represent the ackvec */ |
72 | const u16 nr_opts = (av->dccpav_vec_len + | ||
73 | DCCP_MAX_ACKVEC_OPT_LEN - 1) / | ||
74 | DCCP_MAX_ACKVEC_OPT_LEN; | ||
75 | u16 len = av->dccpav_vec_len + 2 * nr_opts, i; | ||
76 | struct timeval now; | 76 | struct timeval now; |
77 | u32 elapsed_time; | 77 | u32 elapsed_time; |
78 | unsigned char *to, *from; | 78 | const unsigned char *tail, *from; |
79 | unsigned char *to; | ||
79 | struct dccp_ackvec_record *avr; | 80 | struct dccp_ackvec_record *avr; |
80 | 81 | ||
81 | if (DCCP_SKB_CB(skb)->dccpd_opt_len + len > DCCP_MAX_OPT_LEN) | 82 | if (DCCP_SKB_CB(skb)->dccpd_opt_len + len > DCCP_MAX_OPT_LEN) |
@@ -94,24 +95,37 @@ int dccp_insert_option_ackvec(struct sock *sk, struct sk_buff *skb) | |||
94 | 95 | ||
95 | DCCP_SKB_CB(skb)->dccpd_opt_len += len; | 96 | DCCP_SKB_CB(skb)->dccpd_opt_len += len; |
96 | 97 | ||
97 | to = skb_push(skb, len); | 98 | to = skb_push(skb, len); |
98 | *to++ = DCCPO_ACK_VECTOR_0; | ||
99 | *to++ = len; | ||
100 | |||
101 | len = av->dccpav_vec_len; | 99 | len = av->dccpav_vec_len; |
102 | from = av->dccpav_buf + av->dccpav_buf_head; | 100 | from = av->dccpav_buf + av->dccpav_buf_head; |
101 | tail = av->dccpav_buf + DCCP_MAX_ACKVEC_LEN; | ||
102 | |||
103 | for (i = 0; i < nr_opts; ++i) { | ||
104 | int copylen = len; | ||
103 | 105 | ||
104 | /* Check if buf_head wraps */ | 106 | if (len > DCCP_MAX_ACKVEC_OPT_LEN) |
105 | if ((int)av->dccpav_buf_head + len > DCCP_MAX_ACKVEC_LEN) { | 107 | copylen = DCCP_MAX_ACKVEC_OPT_LEN; |
106 | const u32 tailsize = DCCP_MAX_ACKVEC_LEN - av->dccpav_buf_head; | 108 | |
109 | *to++ = DCCPO_ACK_VECTOR_0; | ||
110 | *to++ = copylen + 2; | ||
111 | |||
112 | /* Check if buf_head wraps */ | ||
113 | if (from + copylen > tail) { | ||
114 | const u16 tailsize = tail - from; | ||
115 | |||
116 | memcpy(to, from, tailsize); | ||
117 | to += tailsize; | ||
118 | len -= tailsize; | ||
119 | copylen -= tailsize; | ||
120 | from = av->dccpav_buf; | ||
121 | } | ||
107 | 122 | ||
108 | memcpy(to, from, tailsize); | 123 | memcpy(to, from, copylen); |
109 | to += tailsize; | 124 | from += copylen; |
110 | len -= tailsize; | 125 | to += copylen; |
111 | from = av->dccpav_buf; | 126 | len -= copylen; |
112 | } | 127 | } |
113 | 128 | ||
114 | memcpy(to, from, len); | ||
115 | /* | 129 | /* |
116 | * From RFC 4340, A.2: | 130 | * From RFC 4340, A.2: |
117 | * | 131 | * |
@@ -129,9 +143,9 @@ int dccp_insert_option_ackvec(struct sock *sk, struct sk_buff *skb) | |||
129 | 143 | ||
130 | dccp_ackvec_insert_avr(av, avr); | 144 | dccp_ackvec_insert_avr(av, avr); |
131 | 145 | ||
132 | dccp_pr_debug("%sACK Vector 0, len=%d, ack_seqno=%llu, " | 146 | dccp_pr_debug("%s ACK Vector 0, len=%d, ack_seqno=%llu, " |
133 | "ack_ackno=%llu\n", | 147 | "ack_ackno=%llu\n", |
134 | debug_prefix, avr->dccpavr_sent_len, | 148 | dccp_role(sk), avr->dccpavr_sent_len, |
135 | (unsigned long long)avr->dccpavr_ack_seqno, | 149 | (unsigned long long)avr->dccpavr_ack_seqno, |
136 | (unsigned long long)avr->dccpavr_ack_ackno); | 150 | (unsigned long long)avr->dccpavr_ack_ackno); |
137 | return 0; | 151 | return 0; |
@@ -145,7 +159,6 @@ struct dccp_ackvec *dccp_ackvec_alloc(const gfp_t priority) | |||
145 | av->dccpav_buf_head = DCCP_MAX_ACKVEC_LEN - 1; | 159 | av->dccpav_buf_head = DCCP_MAX_ACKVEC_LEN - 1; |
146 | av->dccpav_buf_ackno = DCCP_MAX_SEQNO + 1; | 160 | av->dccpav_buf_ackno = DCCP_MAX_SEQNO + 1; |
147 | av->dccpav_buf_nonce = av->dccpav_buf_nonce = 0; | 161 | av->dccpav_buf_nonce = av->dccpav_buf_nonce = 0; |
148 | av->dccpav_ack_ptr = 0; | ||
149 | av->dccpav_time.tv_sec = 0; | 162 | av->dccpav_time.tv_sec = 0; |
150 | av->dccpav_time.tv_usec = 0; | 163 | av->dccpav_time.tv_usec = 0; |
151 | av->dccpav_vec_len = 0; | 164 | av->dccpav_vec_len = 0; |
@@ -174,13 +187,13 @@ void dccp_ackvec_free(struct dccp_ackvec *av) | |||
174 | } | 187 | } |
175 | 188 | ||
176 | static inline u8 dccp_ackvec_state(const struct dccp_ackvec *av, | 189 | static inline u8 dccp_ackvec_state(const struct dccp_ackvec *av, |
177 | const u8 index) | 190 | const u32 index) |
178 | { | 191 | { |
179 | return av->dccpav_buf[index] & DCCP_ACKVEC_STATE_MASK; | 192 | return av->dccpav_buf[index] & DCCP_ACKVEC_STATE_MASK; |
180 | } | 193 | } |
181 | 194 | ||
182 | static inline u8 dccp_ackvec_len(const struct dccp_ackvec *av, | 195 | static inline u8 dccp_ackvec_len(const struct dccp_ackvec *av, |
183 | const u8 index) | 196 | const u32 index) |
184 | { | 197 | { |
185 | return av->dccpav_buf[index] & DCCP_ACKVEC_LEN_MASK; | 198 | return av->dccpav_buf[index] & DCCP_ACKVEC_LEN_MASK; |
186 | } | 199 | } |
@@ -280,7 +293,7 @@ int dccp_ackvec_add(struct dccp_ackvec *av, const struct sock *sk, | |||
280 | * could reduce the complexity of this scan.) | 293 | * could reduce the complexity of this scan.) |
281 | */ | 294 | */ |
282 | u64 delta = dccp_delta_seqno(ackno, av->dccpav_buf_ackno); | 295 | u64 delta = dccp_delta_seqno(ackno, av->dccpav_buf_ackno); |
283 | u8 index = av->dccpav_buf_head; | 296 | u32 index = av->dccpav_buf_head; |
284 | 297 | ||
285 | while (1) { | 298 | while (1) { |
286 | const u8 len = dccp_ackvec_len(av, index); | 299 | const u8 len = dccp_ackvec_len(av, index); |
@@ -322,21 +335,18 @@ out_duplicate: | |||
322 | #ifdef CONFIG_IP_DCCP_DEBUG | 335 | #ifdef CONFIG_IP_DCCP_DEBUG |
323 | void dccp_ackvector_print(const u64 ackno, const unsigned char *vector, int len) | 336 | void dccp_ackvector_print(const u64 ackno, const unsigned char *vector, int len) |
324 | { | 337 | { |
325 | if (!dccp_debug) | 338 | dccp_pr_debug_cat("ACK vector len=%d, ackno=%llu |", len, |
326 | return; | 339 | (unsigned long long)ackno); |
327 | |||
328 | printk("ACK vector len=%d, ackno=%llu |", len, | ||
329 | (unsigned long long)ackno); | ||
330 | 340 | ||
331 | while (len--) { | 341 | while (len--) { |
332 | const u8 state = (*vector & DCCP_ACKVEC_STATE_MASK) >> 6; | 342 | const u8 state = (*vector & DCCP_ACKVEC_STATE_MASK) >> 6; |
333 | const u8 rl = *vector & DCCP_ACKVEC_LEN_MASK; | 343 | const u8 rl = *vector & DCCP_ACKVEC_LEN_MASK; |
334 | 344 | ||
335 | printk("%d,%d|", state, rl); | 345 | dccp_pr_debug_cat("%d,%d|", state, rl); |
336 | ++vector; | 346 | ++vector; |
337 | } | 347 | } |
338 | 348 | ||
339 | printk("\n"); | 349 | dccp_pr_debug_cat("\n"); |
340 | } | 350 | } |
341 | 351 | ||
342 | void dccp_ackvec_print(const struct dccp_ackvec *av) | 352 | void dccp_ackvec_print(const struct dccp_ackvec *av) |
@@ -380,24 +390,20 @@ void dccp_ackvec_check_rcv_ackno(struct dccp_ackvec *av, struct sock *sk, | |||
380 | */ | 390 | */ |
381 | list_for_each_entry_reverse(avr, &av->dccpav_records, dccpavr_node) { | 391 | list_for_each_entry_reverse(avr, &av->dccpav_records, dccpavr_node) { |
382 | if (ackno == avr->dccpavr_ack_seqno) { | 392 | if (ackno == avr->dccpavr_ack_seqno) { |
383 | #ifdef CONFIG_IP_DCCP_DEBUG | 393 | dccp_pr_debug("%s ACK packet 0, len=%d, ack_seqno=%llu, " |
384 | struct dccp_sock *dp = dccp_sk(sk); | ||
385 | const char *debug_prefix = dp->dccps_role == DCCP_ROLE_CLIENT ? | ||
386 | "CLIENT rx ack: " : "server rx ack: "; | ||
387 | #endif | ||
388 | dccp_pr_debug("%sACK packet 0, len=%d, ack_seqno=%llu, " | ||
389 | "ack_ackno=%llu, ACKED!\n", | 394 | "ack_ackno=%llu, ACKED!\n", |
390 | debug_prefix, 1, | 395 | dccp_role(sk), 1, |
391 | (unsigned long long)avr->dccpavr_ack_seqno, | 396 | (unsigned long long)avr->dccpavr_ack_seqno, |
392 | (unsigned long long)avr->dccpavr_ack_ackno); | 397 | (unsigned long long)avr->dccpavr_ack_ackno); |
393 | dccp_ackvec_throw_record(av, avr); | 398 | dccp_ackvec_throw_record(av, avr); |
394 | break; | 399 | break; |
395 | } | 400 | } else if (avr->dccpavr_ack_seqno > ackno) |
401 | break; /* old news */ | ||
396 | } | 402 | } |
397 | } | 403 | } |
398 | 404 | ||
399 | static void dccp_ackvec_check_rcv_ackvector(struct dccp_ackvec *av, | 405 | static void dccp_ackvec_check_rcv_ackvector(struct dccp_ackvec *av, |
400 | struct sock *sk, u64 ackno, | 406 | struct sock *sk, u64 *ackno, |
401 | const unsigned char len, | 407 | const unsigned char len, |
402 | const unsigned char *vector) | 408 | const unsigned char *vector) |
403 | { | 409 | { |
@@ -420,7 +426,7 @@ static void dccp_ackvec_check_rcv_ackvector(struct dccp_ackvec *av, | |||
420 | const u8 rl = *vector & DCCP_ACKVEC_LEN_MASK; | 426 | const u8 rl = *vector & DCCP_ACKVEC_LEN_MASK; |
421 | u64 ackno_end_rl; | 427 | u64 ackno_end_rl; |
422 | 428 | ||
423 | dccp_set_seqno(&ackno_end_rl, ackno - rl); | 429 | dccp_set_seqno(&ackno_end_rl, *ackno - rl); |
424 | 430 | ||
425 | /* | 431 | /* |
426 | * If our AVR sequence number is greater than the ack, go | 432 | * If our AVR sequence number is greater than the ack, go |
@@ -428,25 +434,19 @@ static void dccp_ackvec_check_rcv_ackvector(struct dccp_ackvec *av, | |||
428 | */ | 434 | */ |
429 | list_for_each_entry_from(avr, &av->dccpav_records, | 435 | list_for_each_entry_from(avr, &av->dccpav_records, |
430 | dccpavr_node) { | 436 | dccpavr_node) { |
431 | if (!after48(avr->dccpavr_ack_seqno, ackno)) | 437 | if (!after48(avr->dccpavr_ack_seqno, *ackno)) |
432 | goto found; | 438 | goto found; |
433 | } | 439 | } |
434 | /* End of the dccpav_records list, not found, exit */ | 440 | /* End of the dccpav_records list, not found, exit */ |
435 | break; | 441 | break; |
436 | found: | 442 | found: |
437 | if (between48(avr->dccpavr_ack_seqno, ackno_end_rl, ackno)) { | 443 | if (between48(avr->dccpavr_ack_seqno, ackno_end_rl, *ackno)) { |
438 | const u8 state = *vector & DCCP_ACKVEC_STATE_MASK; | 444 | const u8 state = *vector & DCCP_ACKVEC_STATE_MASK; |
439 | if (state != DCCP_ACKVEC_STATE_NOT_RECEIVED) { | 445 | if (state != DCCP_ACKVEC_STATE_NOT_RECEIVED) { |
440 | #ifdef CONFIG_IP_DCCP_DEBUG | 446 | dccp_pr_debug("%s ACK vector 0, len=%d, " |
441 | struct dccp_sock *dp = dccp_sk(sk); | ||
442 | const char *debug_prefix = | ||
443 | dp->dccps_role == DCCP_ROLE_CLIENT ? | ||
444 | "CLIENT rx ack: " : "server rx ack: "; | ||
445 | #endif | ||
446 | dccp_pr_debug("%sACK vector 0, len=%d, " | ||
447 | "ack_seqno=%llu, ack_ackno=%llu, " | 447 | "ack_seqno=%llu, ack_ackno=%llu, " |
448 | "ACKED!\n", | 448 | "ACKED!\n", |
449 | debug_prefix, len, | 449 | dccp_role(sk), len, |
450 | (unsigned long long) | 450 | (unsigned long long) |
451 | avr->dccpavr_ack_seqno, | 451 | avr->dccpavr_ack_seqno, |
452 | (unsigned long long) | 452 | (unsigned long long) |
@@ -460,27 +460,23 @@ found: | |||
460 | */ | 460 | */ |
461 | } | 461 | } |
462 | 462 | ||
463 | dccp_set_seqno(&ackno, ackno_end_rl - 1); | 463 | dccp_set_seqno(ackno, ackno_end_rl - 1); |
464 | ++vector; | 464 | ++vector; |
465 | } | 465 | } |
466 | } | 466 | } |
467 | 467 | ||
468 | int dccp_ackvec_parse(struct sock *sk, const struct sk_buff *skb, | 468 | int dccp_ackvec_parse(struct sock *sk, const struct sk_buff *skb, |
469 | const u8 opt, const u8 *value, const u8 len) | 469 | u64 *ackno, const u8 opt, const u8 *value, const u8 len) |
470 | { | 470 | { |
471 | if (len > DCCP_MAX_ACKVEC_LEN) | 471 | if (len > DCCP_MAX_ACKVEC_OPT_LEN) |
472 | return -1; | 472 | return -1; |
473 | 473 | ||
474 | /* dccp_ackvector_print(DCCP_SKB_CB(skb)->dccpd_ack_seq, value, len); */ | 474 | /* dccp_ackvector_print(DCCP_SKB_CB(skb)->dccpd_ack_seq, value, len); */ |
475 | dccp_ackvec_check_rcv_ackvector(dccp_sk(sk)->dccps_hc_rx_ackvec, sk, | 475 | dccp_ackvec_check_rcv_ackvector(dccp_sk(sk)->dccps_hc_rx_ackvec, sk, |
476 | DCCP_SKB_CB(skb)->dccpd_ack_seq, | 476 | ackno, len, value); |
477 | len, value); | ||
478 | return 0; | 477 | return 0; |
479 | } | 478 | } |
480 | 479 | ||
481 | static char dccp_ackvec_slab_msg[] __initdata = | ||
482 | KERN_CRIT "DCCP: Unable to create ack vectors slab caches\n"; | ||
483 | |||
484 | int __init dccp_ackvec_init(void) | 480 | int __init dccp_ackvec_init(void) |
485 | { | 481 | { |
486 | dccp_ackvec_slab = kmem_cache_create("dccp_ackvec", | 482 | dccp_ackvec_slab = kmem_cache_create("dccp_ackvec", |
@@ -502,7 +498,7 @@ out_destroy_slab: | |||
502 | kmem_cache_destroy(dccp_ackvec_slab); | 498 | kmem_cache_destroy(dccp_ackvec_slab); |
503 | dccp_ackvec_slab = NULL; | 499 | dccp_ackvec_slab = NULL; |
504 | out_err: | 500 | out_err: |
505 | printk(dccp_ackvec_slab_msg); | 501 | DCCP_CRIT("Unable to create Ack Vector slab cache"); |
506 | return -ENOBUFS; | 502 | return -ENOBUFS; |
507 | } | 503 | } |
508 | 504 | ||
diff --git a/net/dccp/ackvec.h b/net/dccp/ackvec.h index cf8f20ce23a9..96504a3b16e4 100644 --- a/net/dccp/ackvec.h +++ b/net/dccp/ackvec.h | |||
@@ -17,7 +17,9 @@ | |||
17 | #include <linux/types.h> | 17 | #include <linux/types.h> |
18 | 18 | ||
19 | /* Read about the ECN nonce to see why it is 253 */ | 19 | /* Read about the ECN nonce to see why it is 253 */ |
20 | #define DCCP_MAX_ACKVEC_LEN 253 | 20 | #define DCCP_MAX_ACKVEC_OPT_LEN 253 |
21 | /* We can spread an ack vector across multiple options */ | ||
22 | #define DCCP_MAX_ACKVEC_LEN (DCCP_MAX_ACKVEC_OPT_LEN * 2) | ||
21 | 23 | ||
22 | #define DCCP_ACKVEC_STATE_RECEIVED 0 | 24 | #define DCCP_ACKVEC_STATE_RECEIVED 0 |
23 | #define DCCP_ACKVEC_STATE_ECN_MARKED (1 << 6) | 25 | #define DCCP_ACKVEC_STATE_ECN_MARKED (1 << 6) |
@@ -41,7 +43,6 @@ | |||
41 | * Ack Vectors it has recently sent. For each packet sent carrying an | 43 | * Ack Vectors it has recently sent. For each packet sent carrying an |
42 | * Ack Vector, it remembers four variables: | 44 | * Ack Vector, it remembers four variables: |
43 | * | 45 | * |
44 | * @dccpav_ack_ptr - the value of buf_head at the time of acknowledgement. | ||
45 | * @dccpav_records - list of dccp_ackvec_record | 46 | * @dccpav_records - list of dccp_ackvec_record |
46 | * @dccpav_ack_nonce - the one-bit sum of the ECN Nonces for all State 0. | 47 | * @dccpav_ack_nonce - the one-bit sum of the ECN Nonces for all State 0. |
47 | * | 48 | * |
@@ -52,9 +53,8 @@ struct dccp_ackvec { | |||
52 | u64 dccpav_buf_ackno; | 53 | u64 dccpav_buf_ackno; |
53 | struct list_head dccpav_records; | 54 | struct list_head dccpav_records; |
54 | struct timeval dccpav_time; | 55 | struct timeval dccpav_time; |
55 | u8 dccpav_buf_head; | 56 | u16 dccpav_buf_head; |
56 | u8 dccpav_ack_ptr; | 57 | u16 dccpav_vec_len; |
57 | u8 dccpav_vec_len; | ||
58 | u8 dccpav_buf_nonce; | 58 | u8 dccpav_buf_nonce; |
59 | u8 dccpav_ack_nonce; | 59 | u8 dccpav_ack_nonce; |
60 | u8 dccpav_buf[DCCP_MAX_ACKVEC_LEN]; | 60 | u8 dccpav_buf[DCCP_MAX_ACKVEC_LEN]; |
@@ -77,9 +77,9 @@ struct dccp_ackvec_record { | |||
77 | struct list_head dccpavr_node; | 77 | struct list_head dccpavr_node; |
78 | u64 dccpavr_ack_seqno; | 78 | u64 dccpavr_ack_seqno; |
79 | u64 dccpavr_ack_ackno; | 79 | u64 dccpavr_ack_ackno; |
80 | u8 dccpavr_ack_ptr; | 80 | u16 dccpavr_ack_ptr; |
81 | u16 dccpavr_sent_len; | ||
81 | u8 dccpavr_ack_nonce; | 82 | u8 dccpavr_ack_nonce; |
82 | u8 dccpavr_sent_len; | ||
83 | }; | 83 | }; |
84 | 84 | ||
85 | struct sock; | 85 | struct sock; |
@@ -98,7 +98,8 @@ extern int dccp_ackvec_add(struct dccp_ackvec *av, const struct sock *sk, | |||
98 | extern void dccp_ackvec_check_rcv_ackno(struct dccp_ackvec *av, | 98 | extern void dccp_ackvec_check_rcv_ackno(struct dccp_ackvec *av, |
99 | struct sock *sk, const u64 ackno); | 99 | struct sock *sk, const u64 ackno); |
100 | extern int dccp_ackvec_parse(struct sock *sk, const struct sk_buff *skb, | 100 | extern int dccp_ackvec_parse(struct sock *sk, const struct sk_buff *skb, |
101 | const u8 opt, const u8 *value, const u8 len); | 101 | u64 *ackno, const u8 opt, |
102 | const u8 *value, const u8 len); | ||
102 | 103 | ||
103 | extern int dccp_insert_option_ackvec(struct sock *sk, struct sk_buff *skb); | 104 | extern int dccp_insert_option_ackvec(struct sock *sk, struct sk_buff *skb); |
104 | 105 | ||
@@ -137,7 +138,8 @@ static inline void dccp_ackvec_check_rcv_ackno(struct dccp_ackvec *av, | |||
137 | } | 138 | } |
138 | 139 | ||
139 | static inline int dccp_ackvec_parse(struct sock *sk, const struct sk_buff *skb, | 140 | static inline int dccp_ackvec_parse(struct sock *sk, const struct sk_buff *skb, |
140 | const u8 opt, const u8 *value, const u8 len) | 141 | const u64 *ackno, const u8 opt, |
142 | const u8 *value, const u8 len) | ||
141 | { | 143 | { |
142 | return -1; | 144 | return -1; |
143 | } | 145 | } |
diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h index f7eb6c613414..c7c29514dce8 100644 --- a/net/dccp/ccid.h +++ b/net/dccp/ccid.h | |||
@@ -52,9 +52,9 @@ struct ccid_operations { | |||
52 | unsigned char len, u16 idx, | 52 | unsigned char len, u16 idx, |
53 | unsigned char* value); | 53 | unsigned char* value); |
54 | int (*ccid_hc_tx_send_packet)(struct sock *sk, | 54 | int (*ccid_hc_tx_send_packet)(struct sock *sk, |
55 | struct sk_buff *skb, int len); | 55 | struct sk_buff *skb); |
56 | void (*ccid_hc_tx_packet_sent)(struct sock *sk, int more, | 56 | void (*ccid_hc_tx_packet_sent)(struct sock *sk, |
57 | int len); | 57 | int more, unsigned int len); |
58 | void (*ccid_hc_rx_get_info)(struct sock *sk, | 58 | void (*ccid_hc_rx_get_info)(struct sock *sk, |
59 | struct tcp_info *info); | 59 | struct tcp_info *info); |
60 | void (*ccid_hc_tx_get_info)(struct sock *sk, | 60 | void (*ccid_hc_tx_get_info)(struct sock *sk, |
@@ -94,16 +94,16 @@ extern void ccid_hc_rx_delete(struct ccid *ccid, struct sock *sk); | |||
94 | extern void ccid_hc_tx_delete(struct ccid *ccid, struct sock *sk); | 94 | extern void ccid_hc_tx_delete(struct ccid *ccid, struct sock *sk); |
95 | 95 | ||
96 | static inline int ccid_hc_tx_send_packet(struct ccid *ccid, struct sock *sk, | 96 | static inline int ccid_hc_tx_send_packet(struct ccid *ccid, struct sock *sk, |
97 | struct sk_buff *skb, int len) | 97 | struct sk_buff *skb) |
98 | { | 98 | { |
99 | int rc = 0; | 99 | int rc = 0; |
100 | if (ccid->ccid_ops->ccid_hc_tx_send_packet != NULL) | 100 | if (ccid->ccid_ops->ccid_hc_tx_send_packet != NULL) |
101 | rc = ccid->ccid_ops->ccid_hc_tx_send_packet(sk, skb, len); | 101 | rc = ccid->ccid_ops->ccid_hc_tx_send_packet(sk, skb); |
102 | return rc; | 102 | return rc; |
103 | } | 103 | } |
104 | 104 | ||
105 | static inline void ccid_hc_tx_packet_sent(struct ccid *ccid, struct sock *sk, | 105 | static inline void ccid_hc_tx_packet_sent(struct ccid *ccid, struct sock *sk, |
106 | int more, int len) | 106 | int more, unsigned int len) |
107 | { | 107 | { |
108 | if (ccid->ccid_ops->ccid_hc_tx_packet_sent != NULL) | 108 | if (ccid->ccid_ops->ccid_hc_tx_packet_sent != NULL) |
109 | ccid->ccid_ops->ccid_hc_tx_packet_sent(sk, more, len); | 109 | ccid->ccid_ops->ccid_hc_tx_packet_sent(sk, more, len); |
diff --git a/net/dccp/ccids/Kconfig b/net/dccp/ccids/Kconfig index 8533dabfb9f8..dac89166eb18 100644 --- a/net/dccp/ccids/Kconfig +++ b/net/dccp/ccids/Kconfig | |||
@@ -28,13 +28,20 @@ config IP_DCCP_CCID2 | |||
28 | This text was extracted from RFC 4340 (sec. 10.1), | 28 | This text was extracted from RFC 4340 (sec. 10.1), |
29 | http://www.ietf.org/rfc/rfc4340.txt | 29 | http://www.ietf.org/rfc/rfc4340.txt |
30 | 30 | ||
31 | To compile this CCID as a module, choose M here: the module will be | ||
32 | called dccp_ccid2. | ||
33 | |||
31 | If in doubt, say M. | 34 | If in doubt, say M. |
32 | 35 | ||
33 | config IP_DCCP_CCID2_DEBUG | 36 | config IP_DCCP_CCID2_DEBUG |
34 | bool "CCID2 debug" | 37 | bool "CCID2 debugging messages" |
35 | depends on IP_DCCP_CCID2 | 38 | depends on IP_DCCP_CCID2 |
36 | ---help--- | 39 | ---help--- |
37 | Enable CCID2 debug messages. | 40 | Enable CCID2-specific debugging messages. |
41 | |||
42 | When compiling CCID2 as a module, this debugging output can | ||
43 | additionally be toggled by setting the ccid2_debug module | ||
44 | parameter to 0 or 1. | ||
38 | 45 | ||
39 | If in doubt, say N. | 46 | If in doubt, say N. |
40 | 47 | ||
@@ -62,10 +69,24 @@ config IP_DCCP_CCID3 | |||
62 | This text was extracted from RFC 4340 (sec. 10.2), | 69 | This text was extracted from RFC 4340 (sec. 10.2), |
63 | http://www.ietf.org/rfc/rfc4340.txt | 70 | http://www.ietf.org/rfc/rfc4340.txt |
64 | 71 | ||
72 | To compile this CCID as a module, choose M here: the module will be | ||
73 | called dccp_ccid3. | ||
74 | |||
65 | If in doubt, say M. | 75 | If in doubt, say M. |
66 | 76 | ||
67 | config IP_DCCP_TFRC_LIB | 77 | config IP_DCCP_TFRC_LIB |
68 | depends on IP_DCCP_CCID3 | 78 | depends on IP_DCCP_CCID3 |
69 | def_tristate IP_DCCP_CCID3 | 79 | def_tristate IP_DCCP_CCID3 |
70 | 80 | ||
81 | config IP_DCCP_CCID3_DEBUG | ||
82 | bool "CCID3 debugging messages" | ||
83 | depends on IP_DCCP_CCID3 | ||
84 | ---help--- | ||
85 | Enable CCID3-specific debugging messages. | ||
86 | |||
87 | When compiling CCID3 as a module, this debugging output can | ||
88 | additionally be toggled by setting the ccid3_debug module | ||
89 | parameter to 0 or 1. | ||
90 | |||
91 | If in doubt, say N. | ||
71 | endmenu | 92 | endmenu |
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c index 162032baeac0..2555be8f4790 100644 --- a/net/dccp/ccids/ccid2.c +++ b/net/dccp/ccids/ccid2.c | |||
@@ -33,18 +33,11 @@ | |||
33 | #include "../dccp.h" | 33 | #include "../dccp.h" |
34 | #include "ccid2.h" | 34 | #include "ccid2.h" |
35 | 35 | ||
36 | static int ccid2_debug; | ||
37 | 36 | ||
38 | #ifdef CONFIG_IP_DCCP_CCID2_DEBUG | 37 | #ifdef CONFIG_IP_DCCP_CCID2_DEBUG |
39 | #define ccid2_pr_debug(format, a...) \ | 38 | static int ccid2_debug; |
40 | do { if (ccid2_debug) \ | 39 | #define ccid2_pr_debug(format, a...) DCCP_PR_DEBUG(ccid2_debug, format, ##a) |
41 | printk(KERN_DEBUG "%s: " format, __FUNCTION__, ##a); \ | ||
42 | } while (0) | ||
43 | #else | ||
44 | #define ccid2_pr_debug(format, a...) | ||
45 | #endif | ||
46 | 40 | ||
47 | #ifdef CONFIG_IP_DCCP_CCID2_DEBUG | ||
48 | static void ccid2_hc_tx_check_sanity(const struct ccid2_hc_tx_sock *hctx) | 41 | static void ccid2_hc_tx_check_sanity(const struct ccid2_hc_tx_sock *hctx) |
49 | { | 42 | { |
50 | int len = 0; | 43 | int len = 0; |
@@ -86,7 +79,8 @@ static void ccid2_hc_tx_check_sanity(const struct ccid2_hc_tx_sock *hctx) | |||
86 | BUG_ON(len != hctx->ccid2hctx_seqbufc * CCID2_SEQBUF_LEN); | 79 | BUG_ON(len != hctx->ccid2hctx_seqbufc * CCID2_SEQBUF_LEN); |
87 | } | 80 | } |
88 | #else | 81 | #else |
89 | #define ccid2_hc_tx_check_sanity(hctx) do {} while (0) | 82 | #define ccid2_pr_debug(format, a...) |
83 | #define ccid2_hc_tx_check_sanity(hctx) | ||
90 | #endif | 84 | #endif |
91 | 85 | ||
92 | static int ccid2_hc_tx_alloc_seq(struct ccid2_hc_tx_sock *hctx, int num, | 86 | static int ccid2_hc_tx_alloc_seq(struct ccid2_hc_tx_sock *hctx, int num, |
@@ -131,8 +125,7 @@ static int ccid2_hc_tx_alloc_seq(struct ccid2_hc_tx_sock *hctx, int num, | |||
131 | return 0; | 125 | return 0; |
132 | } | 126 | } |
133 | 127 | ||
134 | static int ccid2_hc_tx_send_packet(struct sock *sk, | 128 | static int ccid2_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb) |
135 | struct sk_buff *skb, int len) | ||
136 | { | 129 | { |
137 | struct ccid2_hc_tx_sock *hctx; | 130 | struct ccid2_hc_tx_sock *hctx; |
138 | 131 | ||
@@ -274,7 +267,7 @@ static void ccid2_start_rto_timer(struct sock *sk) | |||
274 | jiffies + hctx->ccid2hctx_rto); | 267 | jiffies + hctx->ccid2hctx_rto); |
275 | } | 268 | } |
276 | 269 | ||
277 | static void ccid2_hc_tx_packet_sent(struct sock *sk, int more, int len) | 270 | static void ccid2_hc_tx_packet_sent(struct sock *sk, int more, unsigned int len) |
278 | { | 271 | { |
279 | struct dccp_sock *dp = dccp_sk(sk); | 272 | struct dccp_sock *dp = dccp_sk(sk); |
280 | struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); | 273 | struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); |
@@ -426,7 +419,7 @@ static int ccid2_ackvector(struct sock *sk, struct sk_buff *skb, int offset, | |||
426 | return -1; | 419 | return -1; |
427 | 420 | ||
428 | out_invalid_option: | 421 | out_invalid_option: |
429 | BUG_ON(1); /* should never happen... options were previously parsed ! */ | 422 | DCCP_BUG("Invalid option - this should not happen (previous parsing)!"); |
430 | return -1; | 423 | return -1; |
431 | } | 424 | } |
432 | 425 | ||
@@ -619,7 +612,17 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
619 | } | 612 | } |
620 | 613 | ||
621 | ackno = DCCP_SKB_CB(skb)->dccpd_ack_seq; | 614 | ackno = DCCP_SKB_CB(skb)->dccpd_ack_seq; |
622 | seqp = hctx->ccid2hctx_seqh->ccid2s_prev; | 615 | if (after48(ackno, hctx->ccid2hctx_high_ack)) |
616 | hctx->ccid2hctx_high_ack = ackno; | ||
617 | |||
618 | seqp = hctx->ccid2hctx_seqt; | ||
619 | while (before48(seqp->ccid2s_seq, ackno)) { | ||
620 | seqp = seqp->ccid2s_next; | ||
621 | if (seqp == hctx->ccid2hctx_seqh) { | ||
622 | seqp = hctx->ccid2hctx_seqh->ccid2s_prev; | ||
623 | break; | ||
624 | } | ||
625 | } | ||
623 | 626 | ||
624 | /* If in slow-start, cwnd can increase at most Ack Ratio / 2 packets for | 627 | /* If in slow-start, cwnd can increase at most Ack Ratio / 2 packets for |
625 | * this single ack. I round up. | 628 | * this single ack. I round up. |
@@ -697,7 +700,14 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
697 | /* The state about what is acked should be correct now | 700 | /* The state about what is acked should be correct now |
698 | * Check for NUMDUPACK | 701 | * Check for NUMDUPACK |
699 | */ | 702 | */ |
700 | seqp = hctx->ccid2hctx_seqh->ccid2s_prev; | 703 | seqp = hctx->ccid2hctx_seqt; |
704 | while (before48(seqp->ccid2s_seq, hctx->ccid2hctx_high_ack)) { | ||
705 | seqp = seqp->ccid2s_next; | ||
706 | if (seqp == hctx->ccid2hctx_seqh) { | ||
707 | seqp = hctx->ccid2hctx_seqh->ccid2s_prev; | ||
708 | break; | ||
709 | } | ||
710 | } | ||
701 | done = 0; | 711 | done = 0; |
702 | while (1) { | 712 | while (1) { |
703 | if (seqp->ccid2s_acked) { | 713 | if (seqp->ccid2s_acked) { |
@@ -771,6 +781,7 @@ static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk) | |||
771 | hctx->ccid2hctx_lastrtt = 0; | 781 | hctx->ccid2hctx_lastrtt = 0; |
772 | hctx->ccid2hctx_rpdupack = -1; | 782 | hctx->ccid2hctx_rpdupack = -1; |
773 | hctx->ccid2hctx_last_cong = jiffies; | 783 | hctx->ccid2hctx_last_cong = jiffies; |
784 | hctx->ccid2hctx_high_ack = 0; | ||
774 | 785 | ||
775 | hctx->ccid2hctx_rtotimer.function = &ccid2_hc_tx_rto_expire; | 786 | hctx->ccid2hctx_rtotimer.function = &ccid2_hc_tx_rto_expire; |
776 | hctx->ccid2hctx_rtotimer.data = (unsigned long)sk; | 787 | hctx->ccid2hctx_rtotimer.data = (unsigned long)sk; |
@@ -823,8 +834,10 @@ static struct ccid_operations ccid2 = { | |||
823 | .ccid_hc_rx_packet_recv = ccid2_hc_rx_packet_recv, | 834 | .ccid_hc_rx_packet_recv = ccid2_hc_rx_packet_recv, |
824 | }; | 835 | }; |
825 | 836 | ||
837 | #ifdef CONFIG_IP_DCCP_CCID2_DEBUG | ||
826 | module_param(ccid2_debug, int, 0444); | 838 | module_param(ccid2_debug, int, 0444); |
827 | MODULE_PARM_DESC(ccid2_debug, "Enable debug messages"); | 839 | MODULE_PARM_DESC(ccid2_debug, "Enable debug messages"); |
840 | #endif | ||
828 | 841 | ||
829 | static __init int ccid2_module_init(void) | 842 | static __init int ccid2_module_init(void) |
830 | { | 843 | { |
diff --git a/net/dccp/ccids/ccid2.h b/net/dccp/ccids/ccid2.h index 5b2ef4acb300..ebd79499c85a 100644 --- a/net/dccp/ccids/ccid2.h +++ b/net/dccp/ccids/ccid2.h | |||
@@ -35,7 +35,7 @@ struct ccid2_seq { | |||
35 | struct ccid2_seq *ccid2s_next; | 35 | struct ccid2_seq *ccid2s_next; |
36 | }; | 36 | }; |
37 | 37 | ||
38 | #define CCID2_SEQBUF_LEN 256 | 38 | #define CCID2_SEQBUF_LEN 1024 |
39 | #define CCID2_SEQBUF_MAX 128 | 39 | #define CCID2_SEQBUF_MAX 128 |
40 | 40 | ||
41 | /** struct ccid2_hc_tx_sock - CCID2 TX half connection | 41 | /** struct ccid2_hc_tx_sock - CCID2 TX half connection |
@@ -72,6 +72,7 @@ struct ccid2_hc_tx_sock { | |||
72 | int ccid2hctx_rpdupack; | 72 | int ccid2hctx_rpdupack; |
73 | int ccid2hctx_sendwait; | 73 | int ccid2hctx_sendwait; |
74 | unsigned long ccid2hctx_last_cong; | 74 | unsigned long ccid2hctx_last_cong; |
75 | u64 ccid2hctx_high_ack; | ||
75 | }; | 76 | }; |
76 | 77 | ||
77 | struct ccid2_hc_rx_sock { | 78 | struct ccid2_hc_rx_sock { |
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c index cec23ad286de..70ebe705eb75 100644 --- a/net/dccp/ccids/ccid3.c +++ b/net/dccp/ccids/ccid3.c | |||
@@ -60,13 +60,11 @@ static u32 usecs_div(const u32 a, const u32 b) | |||
60 | return (b >= 2 * div) ? tmp / (b / div) : tmp; | 60 | return (b >= 2 * div) ? tmp / (b / div) : tmp; |
61 | } | 61 | } |
62 | 62 | ||
63 | static int ccid3_debug; | ||
64 | 63 | ||
65 | #ifdef CCID3_DEBUG | 64 | |
66 | #define ccid3_pr_debug(format, a...) \ | 65 | #ifdef CONFIG_IP_DCCP_CCID3_DEBUG |
67 | do { if (ccid3_debug) \ | 66 | static int ccid3_debug; |
68 | printk(KERN_DEBUG "%s: " format, __FUNCTION__, ##a); \ | 67 | #define ccid3_pr_debug(format, a...) DCCP_PR_DEBUG(ccid3_debug, format, ##a) |
69 | } while (0) | ||
70 | #else | 68 | #else |
71 | #define ccid3_pr_debug(format, a...) | 69 | #define ccid3_pr_debug(format, a...) |
72 | #endif | 70 | #endif |
@@ -75,15 +73,7 @@ static struct dccp_tx_hist *ccid3_tx_hist; | |||
75 | static struct dccp_rx_hist *ccid3_rx_hist; | 73 | static struct dccp_rx_hist *ccid3_rx_hist; |
76 | static struct dccp_li_hist *ccid3_li_hist; | 74 | static struct dccp_li_hist *ccid3_li_hist; |
77 | 75 | ||
78 | /* TFRC sender states */ | 76 | #ifdef CONFIG_IP_DCCP_CCID3_DEBUG |
79 | enum ccid3_hc_tx_states { | ||
80 | TFRC_SSTATE_NO_SENT = 1, | ||
81 | TFRC_SSTATE_NO_FBACK, | ||
82 | TFRC_SSTATE_FBACK, | ||
83 | TFRC_SSTATE_TERM, | ||
84 | }; | ||
85 | |||
86 | #ifdef CCID3_DEBUG | ||
87 | static const char *ccid3_tx_state_name(enum ccid3_hc_tx_states state) | 77 | static const char *ccid3_tx_state_name(enum ccid3_hc_tx_states state) |
88 | { | 78 | { |
89 | static char *ccid3_state_names[] = { | 79 | static char *ccid3_state_names[] = { |
@@ -110,25 +100,24 @@ static void ccid3_hc_tx_set_state(struct sock *sk, | |||
110 | hctx->ccid3hctx_state = state; | 100 | hctx->ccid3hctx_state = state; |
111 | } | 101 | } |
112 | 102 | ||
113 | /* Calculate new t_ipi (inter packet interval) by t_ipi = s / X_inst */ | 103 | /* |
114 | static inline void ccid3_calc_new_t_ipi(struct ccid3_hc_tx_sock *hctx) | 104 | * Recalculate scheduled nominal send time t_nom, inter-packet interval |
105 | * t_ipi, and delta value. Should be called after each change to X. | ||
106 | */ | ||
107 | static inline void ccid3_update_send_time(struct ccid3_hc_tx_sock *hctx) | ||
115 | { | 108 | { |
116 | /* | 109 | timeval_sub_usecs(&hctx->ccid3hctx_t_nom, hctx->ccid3hctx_t_ipi); |
117 | * If no feedback spec says t_ipi is 1 second (set elsewhere and then | ||
118 | * doubles after every no feedback timer (separate function) | ||
119 | */ | ||
120 | if (hctx->ccid3hctx_state != TFRC_SSTATE_NO_FBACK) | ||
121 | hctx->ccid3hctx_t_ipi = usecs_div(hctx->ccid3hctx_s, | ||
122 | hctx->ccid3hctx_x); | ||
123 | } | ||
124 | 110 | ||
125 | /* Calculate new delta by delta = min(t_ipi / 2, t_gran / 2) */ | 111 | /* Calculate new t_ipi (inter packet interval) by t_ipi = s / X_inst */ |
126 | static inline void ccid3_calc_new_delta(struct ccid3_hc_tx_sock *hctx) | 112 | hctx->ccid3hctx_t_ipi = usecs_div(hctx->ccid3hctx_s, hctx->ccid3hctx_x); |
127 | { | 113 | |
114 | /* Update nominal send time with regard to the new t_ipi */ | ||
115 | timeval_add_usecs(&hctx->ccid3hctx_t_nom, hctx->ccid3hctx_t_ipi); | ||
116 | |||
117 | /* Calculate new delta by delta = min(t_ipi / 2, t_gran / 2) */ | ||
128 | hctx->ccid3hctx_delta = min_t(u32, hctx->ccid3hctx_t_ipi / 2, | 118 | hctx->ccid3hctx_delta = min_t(u32, hctx->ccid3hctx_t_ipi / 2, |
129 | TFRC_OPSYS_HALF_TIME_GRAN); | 119 | TFRC_OPSYS_HALF_TIME_GRAN); |
130 | } | 120 | } |
131 | |||
132 | /* | 121 | /* |
133 | * Update X by | 122 | * Update X by |
134 | * If (p > 0) | 123 | * If (p > 0) |
@@ -139,76 +128,85 @@ static inline void ccid3_calc_new_delta(struct ccid3_hc_tx_sock *hctx) | |||
139 | * X = max(min(2 * X, 2 * X_recv), s / R); | 128 | * X = max(min(2 * X, 2 * X_recv), s / R); |
140 | * tld = now; | 129 | * tld = now; |
141 | */ | 130 | */ |
142 | static void ccid3_hc_tx_update_x(struct sock *sk) | 131 | static void ccid3_hc_tx_update_x(struct sock *sk, struct timeval *now) |
132 | |||
143 | { | 133 | { |
144 | struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); | 134 | struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); |
135 | const __u32 old_x = hctx->ccid3hctx_x; | ||
145 | 136 | ||
146 | /* To avoid large error in calcX */ | 137 | /* To avoid large error in calcX */ |
147 | if (hctx->ccid3hctx_p >= TFRC_SMALLEST_P) { | 138 | if (hctx->ccid3hctx_p >= TFRC_SMALLEST_P) { |
148 | hctx->ccid3hctx_x_calc = tfrc_calc_x(hctx->ccid3hctx_s, | 139 | hctx->ccid3hctx_x_calc = tfrc_calc_x(hctx->ccid3hctx_s, |
149 | hctx->ccid3hctx_rtt, | 140 | hctx->ccid3hctx_rtt, |
150 | hctx->ccid3hctx_p); | 141 | hctx->ccid3hctx_p); |
151 | hctx->ccid3hctx_x = max_t(u32, min_t(u32, hctx->ccid3hctx_x_calc, | 142 | hctx->ccid3hctx_x = max_t(u32, min(hctx->ccid3hctx_x_calc, |
152 | 2 * hctx->ccid3hctx_x_recv), | 143 | hctx->ccid3hctx_x_recv * 2), |
153 | (hctx->ccid3hctx_s / | 144 | hctx->ccid3hctx_s / TFRC_T_MBI); |
154 | TFRC_MAX_BACK_OFF_TIME)); | 145 | |
155 | } else { | 146 | } else if (timeval_delta(now, &hctx->ccid3hctx_t_ld) >= |
156 | struct timeval now; | 147 | hctx->ccid3hctx_rtt) { |
148 | hctx->ccid3hctx_x = max(min(hctx->ccid3hctx_x_recv, | ||
149 | hctx->ccid3hctx_x ) * 2, | ||
150 | usecs_div(hctx->ccid3hctx_s, | ||
151 | hctx->ccid3hctx_rtt) ); | ||
152 | hctx->ccid3hctx_t_ld = *now; | ||
153 | } else | ||
154 | ccid3_pr_debug("Not changing X\n"); | ||
157 | 155 | ||
158 | dccp_timestamp(sk, &now); | 156 | if (hctx->ccid3hctx_x != old_x) |
159 | if (timeval_delta(&now, &hctx->ccid3hctx_t_ld) >= | 157 | ccid3_update_send_time(hctx); |
160 | hctx->ccid3hctx_rtt) { | 158 | } |
161 | hctx->ccid3hctx_x = max_t(u32, min_t(u32, hctx->ccid3hctx_x_recv, | 159 | |
162 | hctx->ccid3hctx_x) * 2, | 160 | /* |
163 | usecs_div(hctx->ccid3hctx_s, | 161 | * Track the mean packet size `s' (cf. RFC 4342, 5.3 and RFC 3448, 4.1) |
164 | hctx->ccid3hctx_rtt)); | 162 | * @len: DCCP packet payload size in bytes |
165 | hctx->ccid3hctx_t_ld = now; | 163 | */ |
166 | } | 164 | static inline void ccid3_hc_tx_update_s(struct ccid3_hc_tx_sock *hctx, int len) |
167 | } | 165 | { |
166 | if (unlikely(len == 0)) | ||
167 | ccid3_pr_debug("Packet payload length is 0 - not updating\n"); | ||
168 | else | ||
169 | hctx->ccid3hctx_s = hctx->ccid3hctx_s == 0 ? len : | ||
170 | (9 * hctx->ccid3hctx_s + len) / 10; | ||
171 | /* | ||
172 | * Note: We could do a potential optimisation here - when `s' changes, | ||
173 | * recalculate sending rate and consequently t_ipi, t_delta, and | ||
174 | * t_now. This is however non-standard, and the benefits are not | ||
175 | * clear, so it is currently left out. | ||
176 | */ | ||
168 | } | 177 | } |
169 | 178 | ||
170 | static void ccid3_hc_tx_no_feedback_timer(unsigned long data) | 179 | static void ccid3_hc_tx_no_feedback_timer(unsigned long data) |
171 | { | 180 | { |
172 | struct sock *sk = (struct sock *)data; | 181 | struct sock *sk = (struct sock *)data; |
173 | unsigned long next_tmout = 0; | ||
174 | struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); | 182 | struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); |
183 | unsigned long t_nfb = USEC_PER_SEC / 5; | ||
175 | 184 | ||
176 | bh_lock_sock(sk); | 185 | bh_lock_sock(sk); |
177 | if (sock_owned_by_user(sk)) { | 186 | if (sock_owned_by_user(sk)) { |
178 | /* Try again later. */ | 187 | /* Try again later. */ |
179 | /* XXX: set some sensible MIB */ | 188 | /* XXX: set some sensible MIB */ |
180 | sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer, | 189 | goto restart_timer; |
181 | jiffies + HZ / 5); | ||
182 | goto out; | ||
183 | } | 190 | } |
184 | 191 | ||
185 | ccid3_pr_debug("%s, sk=%p, state=%s\n", dccp_role(sk), sk, | 192 | ccid3_pr_debug("%s, sk=%p, state=%s\n", dccp_role(sk), sk, |
186 | ccid3_tx_state_name(hctx->ccid3hctx_state)); | 193 | ccid3_tx_state_name(hctx->ccid3hctx_state)); |
187 | 194 | ||
188 | switch (hctx->ccid3hctx_state) { | 195 | switch (hctx->ccid3hctx_state) { |
189 | case TFRC_SSTATE_TERM: | ||
190 | goto out; | ||
191 | case TFRC_SSTATE_NO_FBACK: | 196 | case TFRC_SSTATE_NO_FBACK: |
192 | /* Halve send rate */ | 197 | /* RFC 3448, 4.4: Halve send rate directly */ |
193 | hctx->ccid3hctx_x /= 2; | 198 | hctx->ccid3hctx_x = min_t(u32, hctx->ccid3hctx_x / 2, |
194 | if (hctx->ccid3hctx_x < (hctx->ccid3hctx_s / | 199 | hctx->ccid3hctx_s / TFRC_T_MBI); |
195 | TFRC_MAX_BACK_OFF_TIME)) | ||
196 | hctx->ccid3hctx_x = (hctx->ccid3hctx_s / | ||
197 | TFRC_MAX_BACK_OFF_TIME); | ||
198 | 200 | ||
199 | ccid3_pr_debug("%s, sk=%p, state=%s, updated tx rate to %d " | 201 | ccid3_pr_debug("%s, sk=%p, state=%s, updated tx rate to %d " |
200 | "bytes/s\n", | 202 | "bytes/s\n", |
201 | dccp_role(sk), sk, | 203 | dccp_role(sk), sk, |
202 | ccid3_tx_state_name(hctx->ccid3hctx_state), | 204 | ccid3_tx_state_name(hctx->ccid3hctx_state), |
203 | hctx->ccid3hctx_x); | 205 | hctx->ccid3hctx_x); |
204 | next_tmout = max_t(u32, 2 * usecs_div(hctx->ccid3hctx_s, | 206 | /* The value of R is still undefined and so we can not recompute |
205 | hctx->ccid3hctx_x), | 207 | * the timout value. Keep initial value as per [RFC 4342, 5]. */ |
206 | TFRC_INITIAL_TIMEOUT); | 208 | t_nfb = TFRC_INITIAL_TIMEOUT; |
207 | /* | 209 | ccid3_update_send_time(hctx); |
208 | * FIXME - not sure above calculation is correct. See section | ||
209 | * 5 of CCID3 11 should adjust tx_t_ipi and double that to | ||
210 | * achieve it really | ||
211 | */ | ||
212 | break; | 210 | break; |
213 | case TFRC_SSTATE_FBACK: | 211 | case TFRC_SSTATE_FBACK: |
214 | /* | 212 | /* |
@@ -218,6 +216,8 @@ static void ccid3_hc_tx_no_feedback_timer(unsigned long data) | |||
218 | if (!hctx->ccid3hctx_idle || | 216 | if (!hctx->ccid3hctx_idle || |
219 | (hctx->ccid3hctx_x_recv >= | 217 | (hctx->ccid3hctx_x_recv >= |
220 | 4 * usecs_div(hctx->ccid3hctx_s, hctx->ccid3hctx_rtt))) { | 218 | 4 * usecs_div(hctx->ccid3hctx_s, hctx->ccid3hctx_rtt))) { |
219 | struct timeval now; | ||
220 | |||
221 | ccid3_pr_debug("%s, sk=%p, state=%s, not idle\n", | 221 | ccid3_pr_debug("%s, sk=%p, state=%s, not idle\n", |
222 | dccp_role(sk), sk, | 222 | dccp_role(sk), sk, |
223 | ccid3_tx_state_name(hctx->ccid3hctx_state)); | 223 | ccid3_tx_state_name(hctx->ccid3hctx_state)); |
@@ -235,55 +235,60 @@ static void ccid3_hc_tx_no_feedback_timer(unsigned long data) | |||
235 | if (hctx->ccid3hctx_p < TFRC_SMALLEST_P || | 235 | if (hctx->ccid3hctx_p < TFRC_SMALLEST_P || |
236 | hctx->ccid3hctx_x_calc > 2 * hctx->ccid3hctx_x_recv) | 236 | hctx->ccid3hctx_x_calc > 2 * hctx->ccid3hctx_x_recv) |
237 | hctx->ccid3hctx_x_recv = max_t(u32, hctx->ccid3hctx_x_recv / 2, | 237 | hctx->ccid3hctx_x_recv = max_t(u32, hctx->ccid3hctx_x_recv / 2, |
238 | hctx->ccid3hctx_s / (2 * TFRC_MAX_BACK_OFF_TIME)); | 238 | hctx->ccid3hctx_s / (2 * TFRC_T_MBI)); |
239 | else | 239 | else |
240 | hctx->ccid3hctx_x_recv = hctx->ccid3hctx_x_calc / 4; | 240 | hctx->ccid3hctx_x_recv = hctx->ccid3hctx_x_calc / 4; |
241 | 241 | ||
242 | /* Update sending rate */ | 242 | /* Update sending rate */ |
243 | ccid3_hc_tx_update_x(sk); | 243 | dccp_timestamp(sk, &now); |
244 | ccid3_hc_tx_update_x(sk, &now); | ||
244 | } | 245 | } |
245 | /* | 246 | /* |
246 | * Schedule no feedback timer to expire in | 247 | * Schedule no feedback timer to expire in |
247 | * max(4 * R, 2 * s / X) | 248 | * max(4 * R, 2 * s/X) = max(4 * R, 2 * t_ipi) |
248 | */ | 249 | */ |
249 | next_tmout = max_t(u32, hctx->ccid3hctx_t_rto, | 250 | t_nfb = max(4 * hctx->ccid3hctx_rtt, 2 * hctx->ccid3hctx_t_ipi); |
250 | 2 * usecs_div(hctx->ccid3hctx_s, | ||
251 | hctx->ccid3hctx_x)); | ||
252 | break; | 251 | break; |
253 | default: | 252 | case TFRC_SSTATE_NO_SENT: |
254 | printk(KERN_CRIT "%s: %s, sk=%p, Illegal state (%d)!\n", | 253 | DCCP_BUG("Illegal %s state NO_SENT, sk=%p", dccp_role(sk), sk); |
255 | __FUNCTION__, dccp_role(sk), sk, hctx->ccid3hctx_state); | 254 | /* fall through */ |
256 | dump_stack(); | 255 | case TFRC_SSTATE_TERM: |
257 | goto out; | 256 | goto out; |
258 | } | 257 | } |
259 | 258 | ||
260 | sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer, | ||
261 | jiffies + max_t(u32, 1, usecs_to_jiffies(next_tmout))); | ||
262 | hctx->ccid3hctx_idle = 1; | 259 | hctx->ccid3hctx_idle = 1; |
260 | |||
261 | restart_timer: | ||
262 | sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer, | ||
263 | jiffies + usecs_to_jiffies(t_nfb)); | ||
263 | out: | 264 | out: |
264 | bh_unlock_sock(sk); | 265 | bh_unlock_sock(sk); |
265 | sock_put(sk); | 266 | sock_put(sk); |
266 | } | 267 | } |
267 | 268 | ||
268 | static int ccid3_hc_tx_send_packet(struct sock *sk, | 269 | /* |
269 | struct sk_buff *skb, int len) | 270 | * returns |
271 | * > 0: delay (in msecs) that should pass before actually sending | ||
272 | * = 0: can send immediately | ||
273 | * < 0: error condition; do not send packet | ||
274 | */ | ||
275 | static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb) | ||
270 | { | 276 | { |
271 | struct dccp_sock *dp = dccp_sk(sk); | 277 | struct dccp_sock *dp = dccp_sk(sk); |
272 | struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); | 278 | struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); |
273 | struct dccp_tx_hist_entry *new_packet; | 279 | struct dccp_tx_hist_entry *new_packet; |
274 | struct timeval now; | 280 | struct timeval now; |
275 | long delay; | 281 | long delay; |
276 | int rc = -ENOTCONN; | ||
277 | 282 | ||
278 | BUG_ON(hctx == NULL || hctx->ccid3hctx_state == TFRC_SSTATE_TERM); | 283 | BUG_ON(hctx == NULL); |
279 | 284 | ||
280 | /* Check if pure ACK or Terminating*/ | ||
281 | /* | 285 | /* |
282 | * XXX: We only call this function for DATA and DATAACK, on, these | 286 | * This function is called only for Data and DataAck packets. Sending |
283 | * packets can have zero length, but why the comment about "pure ACK"? | 287 | * zero-sized Data(Ack)s is theoretically possible, but for congestion |
288 | * control this case is pathological - ignore it. | ||
284 | */ | 289 | */ |
285 | if (unlikely(len == 0)) | 290 | if (unlikely(skb->len == 0)) |
286 | goto out; | 291 | return -EBADMSG; |
287 | 292 | ||
288 | /* See if last packet allocated was not sent */ | 293 | /* See if last packet allocated was not sent */ |
289 | new_packet = dccp_tx_hist_head(&hctx->ccid3hctx_hist); | 294 | new_packet = dccp_tx_hist_head(&hctx->ccid3hctx_hist); |
@@ -291,12 +296,10 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, | |||
291 | new_packet = dccp_tx_hist_entry_new(ccid3_tx_hist, | 296 | new_packet = dccp_tx_hist_entry_new(ccid3_tx_hist, |
292 | SLAB_ATOMIC); | 297 | SLAB_ATOMIC); |
293 | 298 | ||
294 | rc = -ENOBUFS; | ||
295 | if (unlikely(new_packet == NULL)) { | 299 | if (unlikely(new_packet == NULL)) { |
296 | LIMIT_NETDEBUG(KERN_WARNING "%s: %s, sk=%p, not enough " | 300 | DCCP_WARN("%s, sk=%p, not enough mem to add to history," |
297 | "mem to add to history, send refused\n", | 301 | "send refused\n", dccp_role(sk), sk); |
298 | __FUNCTION__, dccp_role(sk), sk); | 302 | return -ENOBUFS; |
299 | goto out; | ||
300 | } | 303 | } |
301 | 304 | ||
302 | dccp_tx_hist_add_entry(&hctx->ccid3hctx_hist, new_packet); | 305 | dccp_tx_hist_add_entry(&hctx->ccid3hctx_hist, new_packet); |
@@ -311,123 +314,94 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, | |||
311 | hctx->ccid3hctx_last_win_count = 0; | 314 | hctx->ccid3hctx_last_win_count = 0; |
312 | hctx->ccid3hctx_t_last_win_count = now; | 315 | hctx->ccid3hctx_t_last_win_count = now; |
313 | ccid3_hc_tx_set_state(sk, TFRC_SSTATE_NO_FBACK); | 316 | ccid3_hc_tx_set_state(sk, TFRC_SSTATE_NO_FBACK); |
314 | hctx->ccid3hctx_t_ipi = TFRC_INITIAL_IPI; | ||
315 | 317 | ||
316 | /* Set nominal send time for initial packet */ | 318 | /* Set initial sending rate to 1 packet per second */ |
319 | ccid3_hc_tx_update_s(hctx, skb->len); | ||
320 | hctx->ccid3hctx_x = hctx->ccid3hctx_s; | ||
321 | |||
322 | /* First timeout, according to [RFC 3448, 4.2], is 1 second */ | ||
323 | hctx->ccid3hctx_t_ipi = USEC_PER_SEC; | ||
324 | /* Initial delta: minimum of 0.5 sec and t_gran/2 */ | ||
325 | hctx->ccid3hctx_delta = TFRC_OPSYS_HALF_TIME_GRAN; | ||
326 | |||
327 | /* Set t_0 for initial packet */ | ||
317 | hctx->ccid3hctx_t_nom = now; | 328 | hctx->ccid3hctx_t_nom = now; |
318 | timeval_add_usecs(&hctx->ccid3hctx_t_nom, | ||
319 | hctx->ccid3hctx_t_ipi); | ||
320 | ccid3_calc_new_delta(hctx); | ||
321 | rc = 0; | ||
322 | break; | 329 | break; |
323 | case TFRC_SSTATE_NO_FBACK: | 330 | case TFRC_SSTATE_NO_FBACK: |
324 | case TFRC_SSTATE_FBACK: | 331 | case TFRC_SSTATE_FBACK: |
325 | delay = (timeval_delta(&now, &hctx->ccid3hctx_t_nom) - | 332 | delay = timeval_delta(&hctx->ccid3hctx_t_nom, &now); |
326 | hctx->ccid3hctx_delta); | 333 | /* |
327 | delay /= -1000; | 334 | * Scheduling of packet transmissions [RFC 3448, 4.6] |
328 | /* divide by -1000 is to convert to ms and get sign right */ | 335 | * |
329 | rc = delay > 0 ? delay : 0; | 336 | * if (t_now > t_nom - delta) |
330 | break; | 337 | * // send the packet now |
331 | default: | 338 | * else |
332 | printk(KERN_CRIT "%s: %s, sk=%p, Illegal state (%d)!\n", | 339 | * // send the packet in (t_nom - t_now) milliseconds. |
333 | __FUNCTION__, dccp_role(sk), sk, hctx->ccid3hctx_state); | 340 | */ |
334 | dump_stack(); | 341 | if (delay >= hctx->ccid3hctx_delta) |
335 | rc = -EINVAL; | 342 | return delay / 1000L; |
336 | break; | 343 | break; |
344 | case TFRC_SSTATE_TERM: | ||
345 | DCCP_BUG("Illegal %s state TERM, sk=%p", dccp_role(sk), sk); | ||
346 | return -EINVAL; | ||
337 | } | 347 | } |
338 | 348 | ||
339 | /* Can we send? if so add options and add to packet history */ | 349 | /* prepare to send now (add options etc.) */ |
340 | if (rc == 0) { | 350 | dp->dccps_hc_tx_insert_options = 1; |
341 | dp->dccps_hc_tx_insert_options = 1; | 351 | new_packet->dccphtx_ccval = DCCP_SKB_CB(skb)->dccpd_ccval = |
342 | new_packet->dccphtx_ccval = | 352 | hctx->ccid3hctx_last_win_count; |
343 | DCCP_SKB_CB(skb)->dccpd_ccval = | 353 | timeval_add_usecs(&hctx->ccid3hctx_t_nom, hctx->ccid3hctx_t_ipi); |
344 | hctx->ccid3hctx_last_win_count; | 354 | |
345 | timeval_add_usecs(&hctx->ccid3hctx_t_nom, | 355 | return 0; |
346 | hctx->ccid3hctx_t_ipi); | ||
347 | } | ||
348 | out: | ||
349 | return rc; | ||
350 | } | 356 | } |
351 | 357 | ||
352 | static void ccid3_hc_tx_packet_sent(struct sock *sk, int more, int len) | 358 | static void ccid3_hc_tx_packet_sent(struct sock *sk, int more, unsigned int len) |
353 | { | 359 | { |
354 | const struct dccp_sock *dp = dccp_sk(sk); | 360 | const struct dccp_sock *dp = dccp_sk(sk); |
355 | struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); | 361 | struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); |
356 | struct timeval now; | 362 | struct timeval now; |
363 | unsigned long quarter_rtt; | ||
364 | struct dccp_tx_hist_entry *packet; | ||
357 | 365 | ||
358 | BUG_ON(hctx == NULL || hctx->ccid3hctx_state == TFRC_SSTATE_TERM); | 366 | BUG_ON(hctx == NULL); |
359 | 367 | ||
360 | dccp_timestamp(sk, &now); | 368 | dccp_timestamp(sk, &now); |
361 | 369 | ||
362 | /* check if we have sent a data packet */ | 370 | ccid3_hc_tx_update_s(hctx, len); |
363 | if (len > 0) { | ||
364 | unsigned long quarter_rtt; | ||
365 | struct dccp_tx_hist_entry *packet; | ||
366 | 371 | ||
367 | packet = dccp_tx_hist_head(&hctx->ccid3hctx_hist); | 372 | packet = dccp_tx_hist_head(&hctx->ccid3hctx_hist); |
368 | if (unlikely(packet == NULL)) { | 373 | if (unlikely(packet == NULL)) { |
369 | LIMIT_NETDEBUG(KERN_WARNING "%s: packet doesn't " | 374 | DCCP_WARN("packet doesn't exist in history!\n"); |
370 | "exists in history!\n", __FUNCTION__); | ||
371 | return; | ||
372 | } | ||
373 | if (unlikely(packet->dccphtx_sent)) { | ||
374 | LIMIT_NETDEBUG(KERN_WARNING "%s: no unsent packet in " | ||
375 | "history!\n", __FUNCTION__); | ||
376 | return; | ||
377 | } | ||
378 | packet->dccphtx_tstamp = now; | ||
379 | packet->dccphtx_seqno = dp->dccps_gss; | ||
380 | /* | ||
381 | * Check if win_count have changed | ||
382 | * Algorithm in "8.1. Window Counter Value" in RFC 4342. | ||
383 | */ | ||
384 | quarter_rtt = timeval_delta(&now, &hctx->ccid3hctx_t_last_win_count); | ||
385 | if (likely(hctx->ccid3hctx_rtt > 8)) | ||
386 | quarter_rtt /= hctx->ccid3hctx_rtt / 4; | ||
387 | |||
388 | if (quarter_rtt > 0) { | ||
389 | hctx->ccid3hctx_t_last_win_count = now; | ||
390 | hctx->ccid3hctx_last_win_count = (hctx->ccid3hctx_last_win_count + | ||
391 | min_t(unsigned long, quarter_rtt, 5)) % 16; | ||
392 | ccid3_pr_debug("%s, sk=%p, window changed from " | ||
393 | "%u to %u!\n", | ||
394 | dccp_role(sk), sk, | ||
395 | packet->dccphtx_ccval, | ||
396 | hctx->ccid3hctx_last_win_count); | ||
397 | } | ||
398 | |||
399 | hctx->ccid3hctx_idle = 0; | ||
400 | packet->dccphtx_rtt = hctx->ccid3hctx_rtt; | ||
401 | packet->dccphtx_sent = 1; | ||
402 | } else | ||
403 | ccid3_pr_debug("%s, sk=%p, seqno=%llu NOT inserted!\n", | ||
404 | dccp_role(sk), sk, dp->dccps_gss); | ||
405 | |||
406 | switch (hctx->ccid3hctx_state) { | ||
407 | case TFRC_SSTATE_NO_SENT: | ||
408 | /* if first wasn't pure ack */ | ||
409 | if (len != 0) | ||
410 | printk(KERN_CRIT "%s: %s, First packet sent is noted " | ||
411 | "as a data packet\n", | ||
412 | __FUNCTION__, dccp_role(sk)); | ||
413 | return; | 375 | return; |
414 | case TFRC_SSTATE_NO_FBACK: | ||
415 | case TFRC_SSTATE_FBACK: | ||
416 | if (len > 0) { | ||
417 | timeval_sub_usecs(&hctx->ccid3hctx_t_nom, | ||
418 | hctx->ccid3hctx_t_ipi); | ||
419 | ccid3_calc_new_t_ipi(hctx); | ||
420 | ccid3_calc_new_delta(hctx); | ||
421 | timeval_add_usecs(&hctx->ccid3hctx_t_nom, | ||
422 | hctx->ccid3hctx_t_ipi); | ||
423 | } | ||
424 | break; | ||
425 | default: | ||
426 | printk(KERN_CRIT "%s: %s, sk=%p, Illegal state (%d)!\n", | ||
427 | __FUNCTION__, dccp_role(sk), sk, hctx->ccid3hctx_state); | ||
428 | dump_stack(); | ||
429 | break; | ||
430 | } | 376 | } |
377 | if (unlikely(packet->dccphtx_sent)) { | ||
378 | DCCP_WARN("no unsent packet in history!\n"); | ||
379 | return; | ||
380 | } | ||
381 | packet->dccphtx_tstamp = now; | ||
382 | packet->dccphtx_seqno = dp->dccps_gss; | ||
383 | /* | ||
384 | * Check if win_count have changed | ||
385 | * Algorithm in "8.1. Window Counter Value" in RFC 4342. | ||
386 | */ | ||
387 | quarter_rtt = timeval_delta(&now, &hctx->ccid3hctx_t_last_win_count); | ||
388 | if (likely(hctx->ccid3hctx_rtt > 8)) | ||
389 | quarter_rtt /= hctx->ccid3hctx_rtt / 4; | ||
390 | |||
391 | if (quarter_rtt > 0) { | ||
392 | hctx->ccid3hctx_t_last_win_count = now; | ||
393 | hctx->ccid3hctx_last_win_count = (hctx->ccid3hctx_last_win_count + | ||
394 | min_t(unsigned long, quarter_rtt, 5)) % 16; | ||
395 | ccid3_pr_debug("%s, sk=%p, window changed from " | ||
396 | "%u to %u!\n", | ||
397 | dccp_role(sk), sk, | ||
398 | packet->dccphtx_ccval, | ||
399 | hctx->ccid3hctx_last_win_count); | ||
400 | } | ||
401 | |||
402 | hctx->ccid3hctx_idle = 0; | ||
403 | packet->dccphtx_rtt = hctx->ccid3hctx_rtt; | ||
404 | packet->dccphtx_sent = 1; | ||
431 | } | 405 | } |
432 | 406 | ||
433 | static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | 407 | static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) |
@@ -437,13 +411,13 @@ static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
437 | struct ccid3_options_received *opt_recv; | 411 | struct ccid3_options_received *opt_recv; |
438 | struct dccp_tx_hist_entry *packet; | 412 | struct dccp_tx_hist_entry *packet; |
439 | struct timeval now; | 413 | struct timeval now; |
440 | unsigned long next_tmout; | 414 | unsigned long t_nfb; |
441 | u32 t_elapsed; | 415 | u32 t_elapsed; |
442 | u32 pinv; | 416 | u32 pinv; |
443 | u32 x_recv; | 417 | u32 x_recv; |
444 | u32 r_sample; | 418 | u32 r_sample; |
445 | 419 | ||
446 | BUG_ON(hctx == NULL || hctx->ccid3hctx_state == TFRC_SSTATE_TERM); | 420 | BUG_ON(hctx == NULL); |
447 | 421 | ||
448 | /* we are only interested in ACKs */ | 422 | /* we are only interested in ACKs */ |
449 | if (!(DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_ACK || | 423 | if (!(DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_ACK || |
@@ -457,9 +431,6 @@ static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
457 | pinv = opt_recv->ccid3or_loss_event_rate; | 431 | pinv = opt_recv->ccid3or_loss_event_rate; |
458 | 432 | ||
459 | switch (hctx->ccid3hctx_state) { | 433 | switch (hctx->ccid3hctx_state) { |
460 | case TFRC_SSTATE_NO_SENT: | ||
461 | /* FIXME: what to do here? */ | ||
462 | return; | ||
463 | case TFRC_SSTATE_NO_FBACK: | 434 | case TFRC_SSTATE_NO_FBACK: |
464 | case TFRC_SSTATE_FBACK: | 435 | case TFRC_SSTATE_FBACK: |
465 | /* Calculate new round trip sample by | 436 | /* Calculate new round trip sample by |
@@ -468,11 +439,10 @@ static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
468 | packet = dccp_tx_hist_find_entry(&hctx->ccid3hctx_hist, | 439 | packet = dccp_tx_hist_find_entry(&hctx->ccid3hctx_hist, |
469 | DCCP_SKB_CB(skb)->dccpd_ack_seq); | 440 | DCCP_SKB_CB(skb)->dccpd_ack_seq); |
470 | if (unlikely(packet == NULL)) { | 441 | if (unlikely(packet == NULL)) { |
471 | LIMIT_NETDEBUG(KERN_WARNING "%s: %s, sk=%p, seqno " | 442 | DCCP_WARN("%s, sk=%p, seqno %llu(%s) does't exist " |
472 | "%llu(%s) does't exist in history!\n", | 443 | "in history!\n", dccp_role(sk), sk, |
473 | __FUNCTION__, dccp_role(sk), sk, | ||
474 | (unsigned long long)DCCP_SKB_CB(skb)->dccpd_ack_seq, | 444 | (unsigned long long)DCCP_SKB_CB(skb)->dccpd_ack_seq, |
475 | dccp_packet_name(DCCP_SKB_CB(skb)->dccpd_type)); | 445 | dccp_packet_name(DCCP_SKB_CB(skb)->dccpd_type)); |
476 | return; | 446 | return; |
477 | } | 447 | } |
478 | 448 | ||
@@ -480,9 +450,8 @@ static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
480 | dccp_timestamp(sk, &now); | 450 | dccp_timestamp(sk, &now); |
481 | r_sample = timeval_delta(&now, &packet->dccphtx_tstamp); | 451 | r_sample = timeval_delta(&now, &packet->dccphtx_tstamp); |
482 | if (unlikely(r_sample <= t_elapsed)) | 452 | if (unlikely(r_sample <= t_elapsed)) |
483 | LIMIT_NETDEBUG(KERN_WARNING "%s: r_sample=%uus, " | 453 | DCCP_WARN("r_sample=%uus,t_elapsed=%uus\n", |
484 | "t_elapsed=%uus\n", | 454 | r_sample, t_elapsed); |
485 | __FUNCTION__, r_sample, t_elapsed); | ||
486 | else | 455 | else |
487 | r_sample -= t_elapsed; | 456 | r_sample -= t_elapsed; |
488 | 457 | ||
@@ -495,20 +464,26 @@ static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
495 | * q is a constant, RFC 3448 recomments 0.9 | 464 | * q is a constant, RFC 3448 recomments 0.9 |
496 | */ | 465 | */ |
497 | if (hctx->ccid3hctx_state == TFRC_SSTATE_NO_FBACK) { | 466 | if (hctx->ccid3hctx_state == TFRC_SSTATE_NO_FBACK) { |
467 | /* Use Larger Initial Windows [RFC 4342, sec. 5] | ||
468 | * We deviate in that we use `s' instead of `MSS'. */ | ||
469 | u16 w_init = max( 4 * hctx->ccid3hctx_s, | ||
470 | max(2 * hctx->ccid3hctx_s, 4380)); | ||
471 | hctx->ccid3hctx_rtt = r_sample; | ||
472 | hctx->ccid3hctx_x = usecs_div(w_init, r_sample); | ||
473 | hctx->ccid3hctx_t_ld = now; | ||
474 | |||
475 | ccid3_update_send_time(hctx); | ||
498 | ccid3_hc_tx_set_state(sk, TFRC_SSTATE_FBACK); | 476 | ccid3_hc_tx_set_state(sk, TFRC_SSTATE_FBACK); |
499 | hctx->ccid3hctx_rtt = r_sample; | 477 | } else { |
500 | } else | ||
501 | hctx->ccid3hctx_rtt = (hctx->ccid3hctx_rtt * 9) / 10 + | 478 | hctx->ccid3hctx_rtt = (hctx->ccid3hctx_rtt * 9) / 10 + |
502 | r_sample / 10; | 479 | r_sample / 10; |
480 | ccid3_hc_tx_update_x(sk, &now); | ||
481 | } | ||
503 | 482 | ||
504 | ccid3_pr_debug("%s, sk=%p, New RTT estimate=%uus, " | 483 | ccid3_pr_debug("%s, sk=%p, New RTT estimate=%uus, " |
505 | "r_sample=%us\n", dccp_role(sk), sk, | 484 | "r_sample=%us\n", dccp_role(sk), sk, |
506 | hctx->ccid3hctx_rtt, r_sample); | 485 | hctx->ccid3hctx_rtt, r_sample); |
507 | 486 | ||
508 | /* Update timeout interval */ | ||
509 | hctx->ccid3hctx_t_rto = max_t(u32, 4 * hctx->ccid3hctx_rtt, | ||
510 | USEC_PER_SEC); | ||
511 | |||
512 | /* Update receive rate */ | 487 | /* Update receive rate */ |
513 | hctx->ccid3hctx_x_recv = x_recv;/* X_recv in bytes per sec */ | 488 | hctx->ccid3hctx_x_recv = x_recv;/* X_recv in bytes per sec */ |
514 | 489 | ||
@@ -528,49 +503,41 @@ static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
528 | /* unschedule no feedback timer */ | 503 | /* unschedule no feedback timer */ |
529 | sk_stop_timer(sk, &hctx->ccid3hctx_no_feedback_timer); | 504 | sk_stop_timer(sk, &hctx->ccid3hctx_no_feedback_timer); |
530 | 505 | ||
531 | /* Update sending rate */ | ||
532 | ccid3_hc_tx_update_x(sk); | ||
533 | |||
534 | /* Update next send time */ | ||
535 | timeval_sub_usecs(&hctx->ccid3hctx_t_nom, | ||
536 | hctx->ccid3hctx_t_ipi); | ||
537 | ccid3_calc_new_t_ipi(hctx); | ||
538 | timeval_add_usecs(&hctx->ccid3hctx_t_nom, | ||
539 | hctx->ccid3hctx_t_ipi); | ||
540 | ccid3_calc_new_delta(hctx); | ||
541 | |||
542 | /* remove all packets older than the one acked from history */ | 506 | /* remove all packets older than the one acked from history */ |
543 | dccp_tx_hist_purge_older(ccid3_tx_hist, | 507 | dccp_tx_hist_purge_older(ccid3_tx_hist, |
544 | &hctx->ccid3hctx_hist, packet); | 508 | &hctx->ccid3hctx_hist, packet); |
545 | /* | 509 | /* |
546 | * As we have calculated new ipi, delta, t_nom it is possible that | 510 | * As we have calculated new ipi, delta, t_nom it is possible that |
547 | * we now can send a packet, so wake up dccp_wait_for_ccids. | 511 | * we now can send a packet, so wake up dccp_wait_for_ccid |
548 | */ | 512 | */ |
549 | sk->sk_write_space(sk); | 513 | sk->sk_write_space(sk); |
550 | 514 | ||
515 | /* Update timeout interval. We use the alternative variant of | ||
516 | * [RFC 3448, 3.1] which sets the upper bound of t_rto to one | ||
517 | * second, as it is suggested for TCP (see RFC 2988, 2.4). */ | ||
518 | hctx->ccid3hctx_t_rto = max_t(u32, 4 * hctx->ccid3hctx_rtt, | ||
519 | USEC_PER_SEC ); | ||
551 | /* | 520 | /* |
552 | * Schedule no feedback timer to expire in | 521 | * Schedule no feedback timer to expire in |
553 | * max(4 * R, 2 * s / X) | 522 | * max(4 * R, 2 * s/X) = max(4 * R, 2 * t_ipi) |
554 | */ | 523 | */ |
555 | next_tmout = max(hctx->ccid3hctx_t_rto, | 524 | t_nfb = max(4 * hctx->ccid3hctx_rtt, 2 * hctx->ccid3hctx_t_ipi); |
556 | 2 * usecs_div(hctx->ccid3hctx_s, | ||
557 | hctx->ccid3hctx_x)); | ||
558 | 525 | ||
559 | ccid3_pr_debug("%s, sk=%p, Scheduled no feedback timer to " | 526 | ccid3_pr_debug("%s, sk=%p, Scheduled no feedback timer to " |
560 | "expire in %lu jiffies (%luus)\n", | 527 | "expire in %lu jiffies (%luus)\n", |
561 | dccp_role(sk), sk, | 528 | dccp_role(sk), sk, |
562 | usecs_to_jiffies(next_tmout), next_tmout); | 529 | usecs_to_jiffies(t_nfb), t_nfb); |
563 | 530 | ||
564 | sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer, | 531 | sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer, |
565 | jiffies + max_t(u32, 1, usecs_to_jiffies(next_tmout))); | 532 | jiffies + usecs_to_jiffies(t_nfb)); |
566 | 533 | ||
567 | /* set idle flag */ | 534 | /* set idle flag */ |
568 | hctx->ccid3hctx_idle = 1; | 535 | hctx->ccid3hctx_idle = 1; |
569 | break; | 536 | break; |
570 | default: | 537 | case TFRC_SSTATE_NO_SENT: |
571 | printk(KERN_CRIT "%s: %s, sk=%p, Illegal state (%d)!\n", | 538 | DCCP_WARN("Illegal ACK received - no packet has been sent\n"); |
572 | __FUNCTION__, dccp_role(sk), sk, hctx->ccid3hctx_state); | 539 | /* fall through */ |
573 | dump_stack(); | 540 | case TFRC_SSTATE_TERM: /* ignore feedback when closing */ |
574 | break; | 541 | break; |
575 | } | 542 | } |
576 | } | 543 | } |
@@ -610,9 +577,9 @@ static int ccid3_hc_tx_parse_options(struct sock *sk, unsigned char option, | |||
610 | switch (option) { | 577 | switch (option) { |
611 | case TFRC_OPT_LOSS_EVENT_RATE: | 578 | case TFRC_OPT_LOSS_EVENT_RATE: |
612 | if (unlikely(len != 4)) { | 579 | if (unlikely(len != 4)) { |
613 | LIMIT_NETDEBUG(KERN_WARNING "%s: %s, sk=%p, invalid " | 580 | DCCP_WARN("%s, sk=%p, invalid len %d " |
614 | "len for TFRC_OPT_LOSS_EVENT_RATE\n", | 581 | "for TFRC_OPT_LOSS_EVENT_RATE\n", |
615 | __FUNCTION__, dccp_role(sk), sk); | 582 | dccp_role(sk), sk, len); |
616 | rc = -EINVAL; | 583 | rc = -EINVAL; |
617 | } else { | 584 | } else { |
618 | opt_recv->ccid3or_loss_event_rate = ntohl(*(__be32 *)value); | 585 | opt_recv->ccid3or_loss_event_rate = ntohl(*(__be32 *)value); |
@@ -631,9 +598,9 @@ static int ccid3_hc_tx_parse_options(struct sock *sk, unsigned char option, | |||
631 | break; | 598 | break; |
632 | case TFRC_OPT_RECEIVE_RATE: | 599 | case TFRC_OPT_RECEIVE_RATE: |
633 | if (unlikely(len != 4)) { | 600 | if (unlikely(len != 4)) { |
634 | LIMIT_NETDEBUG(KERN_WARNING "%s: %s, sk=%p, invalid " | 601 | DCCP_WARN("%s, sk=%p, invalid len %d " |
635 | "len for TFRC_OPT_RECEIVE_RATE\n", | 602 | "for TFRC_OPT_RECEIVE_RATE\n", |
636 | __FUNCTION__, dccp_role(sk), sk); | 603 | dccp_role(sk), sk, len); |
637 | rc = -EINVAL; | 604 | rc = -EINVAL; |
638 | } else { | 605 | } else { |
639 | opt_recv->ccid3or_receive_rate = ntohl(*(__be32 *)value); | 606 | opt_recv->ccid3or_receive_rate = ntohl(*(__be32 *)value); |
@@ -649,18 +616,9 @@ static int ccid3_hc_tx_parse_options(struct sock *sk, unsigned char option, | |||
649 | 616 | ||
650 | static int ccid3_hc_tx_init(struct ccid *ccid, struct sock *sk) | 617 | static int ccid3_hc_tx_init(struct ccid *ccid, struct sock *sk) |
651 | { | 618 | { |
652 | struct dccp_sock *dp = dccp_sk(sk); | ||
653 | struct ccid3_hc_tx_sock *hctx = ccid_priv(ccid); | 619 | struct ccid3_hc_tx_sock *hctx = ccid_priv(ccid); |
654 | 620 | ||
655 | if (dp->dccps_packet_size >= TFRC_MIN_PACKET_SIZE && | 621 | hctx->ccid3hctx_s = 0; |
656 | dp->dccps_packet_size <= TFRC_MAX_PACKET_SIZE) | ||
657 | hctx->ccid3hctx_s = dp->dccps_packet_size; | ||
658 | else | ||
659 | hctx->ccid3hctx_s = TFRC_STD_PACKET_SIZE; | ||
660 | |||
661 | /* Set transmission rate to 1 packet per second */ | ||
662 | hctx->ccid3hctx_x = hctx->ccid3hctx_s; | ||
663 | hctx->ccid3hctx_t_rto = USEC_PER_SEC; | ||
664 | hctx->ccid3hctx_state = TFRC_SSTATE_NO_SENT; | 622 | hctx->ccid3hctx_state = TFRC_SSTATE_NO_SENT; |
665 | INIT_LIST_HEAD(&hctx->ccid3hctx_hist); | 623 | INIT_LIST_HEAD(&hctx->ccid3hctx_hist); |
666 | 624 | ||
@@ -688,14 +646,7 @@ static void ccid3_hc_tx_exit(struct sock *sk) | |||
688 | * RX Half Connection methods | 646 | * RX Half Connection methods |
689 | */ | 647 | */ |
690 | 648 | ||
691 | /* TFRC receiver states */ | 649 | #ifdef CONFIG_IP_DCCP_CCID3_DEBUG |
692 | enum ccid3_hc_rx_states { | ||
693 | TFRC_RSTATE_NO_DATA = 1, | ||
694 | TFRC_RSTATE_DATA, | ||
695 | TFRC_RSTATE_TERM = 127, | ||
696 | }; | ||
697 | |||
698 | #ifdef CCID3_DEBUG | ||
699 | static const char *ccid3_rx_state_name(enum ccid3_hc_rx_states state) | 650 | static const char *ccid3_rx_state_name(enum ccid3_hc_rx_states state) |
700 | { | 651 | { |
701 | static char *ccid3_rx_state_names[] = { | 652 | static char *ccid3_rx_state_names[] = { |
@@ -721,6 +672,15 @@ static void ccid3_hc_rx_set_state(struct sock *sk, | |||
721 | hcrx->ccid3hcrx_state = state; | 672 | hcrx->ccid3hcrx_state = state; |
722 | } | 673 | } |
723 | 674 | ||
675 | static inline void ccid3_hc_rx_update_s(struct ccid3_hc_rx_sock *hcrx, int len) | ||
676 | { | ||
677 | if (unlikely(len == 0)) /* don't update on empty packets (e.g. ACKs) */ | ||
678 | ccid3_pr_debug("Packet payload length is 0 - not updating\n"); | ||
679 | else | ||
680 | hcrx->ccid3hcrx_s = hcrx->ccid3hcrx_s == 0 ? len : | ||
681 | (9 * hcrx->ccid3hcrx_s + len) / 10; | ||
682 | } | ||
683 | |||
724 | static void ccid3_hc_rx_send_feedback(struct sock *sk) | 684 | static void ccid3_hc_rx_send_feedback(struct sock *sk) |
725 | { | 685 | { |
726 | struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk); | 686 | struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk); |
@@ -743,18 +703,15 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk) | |||
743 | delta); | 703 | delta); |
744 | } | 704 | } |
745 | break; | 705 | break; |
746 | default: | 706 | case TFRC_RSTATE_TERM: |
747 | printk(KERN_CRIT "%s: %s, sk=%p, Illegal state (%d)!\n", | 707 | DCCP_BUG("Illegal %s state TERM, sk=%p", dccp_role(sk), sk); |
748 | __FUNCTION__, dccp_role(sk), sk, hcrx->ccid3hcrx_state); | ||
749 | dump_stack(); | ||
750 | return; | 708 | return; |
751 | } | 709 | } |
752 | 710 | ||
753 | packet = dccp_rx_hist_find_data_packet(&hcrx->ccid3hcrx_hist); | 711 | packet = dccp_rx_hist_find_data_packet(&hcrx->ccid3hcrx_hist); |
754 | if (unlikely(packet == NULL)) { | 712 | if (unlikely(packet == NULL)) { |
755 | LIMIT_NETDEBUG(KERN_WARNING "%s: %s, sk=%p, no data packet " | 713 | DCCP_WARN("%s, sk=%p, no data packet in history!\n", |
756 | "in history!\n", | 714 | dccp_role(sk), sk); |
757 | __FUNCTION__, dccp_role(sk), sk); | ||
758 | return; | 715 | return; |
759 | } | 716 | } |
760 | 717 | ||
@@ -842,29 +799,29 @@ static u32 ccid3_hc_rx_calc_first_li(struct sock *sk) | |||
842 | } | 799 | } |
843 | 800 | ||
844 | if (unlikely(step == 0)) { | 801 | if (unlikely(step == 0)) { |
845 | LIMIT_NETDEBUG(KERN_WARNING "%s: %s, sk=%p, packet history " | 802 | DCCP_WARN("%s, sk=%p, packet history has no data packets!\n", |
846 | "contains no data packets!\n", | 803 | dccp_role(sk), sk); |
847 | __FUNCTION__, dccp_role(sk), sk); | ||
848 | return ~0; | 804 | return ~0; |
849 | } | 805 | } |
850 | 806 | ||
851 | if (unlikely(interval == 0)) { | 807 | if (unlikely(interval == 0)) { |
852 | LIMIT_NETDEBUG(KERN_WARNING "%s: %s, sk=%p, Could not find a " | 808 | DCCP_WARN("%s, sk=%p, Could not find a win_count interval > 0." |
853 | "win_count interval > 0. Defaulting to 1\n", | 809 | "Defaulting to 1\n", dccp_role(sk), sk); |
854 | __FUNCTION__, dccp_role(sk), sk); | ||
855 | interval = 1; | 810 | interval = 1; |
856 | } | 811 | } |
857 | found: | 812 | found: |
858 | if (!tail) { | 813 | if (!tail) { |
859 | LIMIT_NETDEBUG(KERN_WARNING "%s: tail is null\n", | 814 | DCCP_CRIT("tail is null\n"); |
860 | __FUNCTION__); | ||
861 | return ~0; | 815 | return ~0; |
862 | } | 816 | } |
863 | rtt = timeval_delta(&tstamp, &tail->dccphrx_tstamp) * 4 / interval; | 817 | rtt = timeval_delta(&tstamp, &tail->dccphrx_tstamp) * 4 / interval; |
864 | ccid3_pr_debug("%s, sk=%p, approximated RTT to %uus\n", | 818 | ccid3_pr_debug("%s, sk=%p, approximated RTT to %uus\n", |
865 | dccp_role(sk), sk, rtt); | 819 | dccp_role(sk), sk, rtt); |
866 | if (rtt == 0) | 820 | |
867 | rtt = 1; | 821 | if (rtt == 0) { |
822 | DCCP_WARN("RTT==0, setting to 1\n"); | ||
823 | rtt = 1; | ||
824 | } | ||
868 | 825 | ||
869 | dccp_timestamp(sk, &tstamp); | 826 | dccp_timestamp(sk, &tstamp); |
870 | delta = timeval_delta(&tstamp, &hcrx->ccid3hcrx_tstamp_last_feedback); | 827 | delta = timeval_delta(&tstamp, &hcrx->ccid3hcrx_tstamp_last_feedback); |
@@ -878,9 +835,7 @@ found: | |||
878 | tmp2 = (u32)tmp1; | 835 | tmp2 = (u32)tmp1; |
879 | 836 | ||
880 | if (!tmp2) { | 837 | if (!tmp2) { |
881 | LIMIT_NETDEBUG(KERN_WARNING "tmp2 = 0 " | 838 | DCCP_CRIT("tmp2 = 0, x_recv = %u, rtt =%u\n", x_recv, rtt); |
882 | "%s: x_recv = %u, rtt =%u\n", | ||
883 | __FUNCTION__, x_recv, rtt); | ||
884 | return ~0; | 839 | return ~0; |
885 | } | 840 | } |
886 | 841 | ||
@@ -926,8 +881,7 @@ static void ccid3_hc_rx_update_li(struct sock *sk, u64 seq_loss, u8 win_loss) | |||
926 | entry = dccp_li_hist_entry_new(ccid3_li_hist, SLAB_ATOMIC); | 881 | entry = dccp_li_hist_entry_new(ccid3_li_hist, SLAB_ATOMIC); |
927 | 882 | ||
928 | if (entry == NULL) { | 883 | if (entry == NULL) { |
929 | printk(KERN_CRIT "%s: out of memory\n",__FUNCTION__); | 884 | DCCP_BUG("out of memory - can not allocate entry"); |
930 | dump_stack(); | ||
931 | return; | 885 | return; |
932 | } | 886 | } |
933 | 887 | ||
@@ -1002,13 +956,10 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
1002 | const struct dccp_options_received *opt_recv; | 956 | const struct dccp_options_received *opt_recv; |
1003 | struct dccp_rx_hist_entry *packet; | 957 | struct dccp_rx_hist_entry *packet; |
1004 | struct timeval now; | 958 | struct timeval now; |
1005 | u8 win_count; | ||
1006 | u32 p_prev, rtt_prev, r_sample, t_elapsed; | 959 | u32 p_prev, rtt_prev, r_sample, t_elapsed; |
1007 | int loss; | 960 | int loss, payload_size; |
1008 | 961 | ||
1009 | BUG_ON(hcrx == NULL || | 962 | BUG_ON(hcrx == NULL); |
1010 | !(hcrx->ccid3hcrx_state == TFRC_RSTATE_NO_DATA || | ||
1011 | hcrx->ccid3hcrx_state == TFRC_RSTATE_DATA)); | ||
1012 | 963 | ||
1013 | opt_recv = &dccp_sk(sk)->dccps_options_received; | 964 | opt_recv = &dccp_sk(sk)->dccps_options_received; |
1014 | 965 | ||
@@ -1026,9 +977,8 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
1026 | t_elapsed = opt_recv->dccpor_elapsed_time * 10; | 977 | t_elapsed = opt_recv->dccpor_elapsed_time * 10; |
1027 | 978 | ||
1028 | if (unlikely(r_sample <= t_elapsed)) | 979 | if (unlikely(r_sample <= t_elapsed)) |
1029 | LIMIT_NETDEBUG(KERN_WARNING "%s: r_sample=%uus, " | 980 | DCCP_WARN("r_sample=%uus, t_elapsed=%uus\n", |
1030 | "t_elapsed=%uus\n", | 981 | r_sample, t_elapsed); |
1031 | __FUNCTION__, r_sample, t_elapsed); | ||
1032 | else | 982 | else |
1033 | r_sample -= t_elapsed; | 983 | r_sample -= t_elapsed; |
1034 | 984 | ||
@@ -1052,19 +1002,19 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
1052 | packet = dccp_rx_hist_entry_new(ccid3_rx_hist, sk, opt_recv->dccpor_ndp, | 1002 | packet = dccp_rx_hist_entry_new(ccid3_rx_hist, sk, opt_recv->dccpor_ndp, |
1053 | skb, SLAB_ATOMIC); | 1003 | skb, SLAB_ATOMIC); |
1054 | if (unlikely(packet == NULL)) { | 1004 | if (unlikely(packet == NULL)) { |
1055 | LIMIT_NETDEBUG(KERN_WARNING "%s: %s, sk=%p, Not enough mem to " | 1005 | DCCP_WARN("%s, sk=%p, Not enough mem to add rx packet " |
1056 | "add rx packet to history, consider it lost!\n", | 1006 | "to history, consider it lost!\n", dccp_role(sk), sk); |
1057 | __FUNCTION__, dccp_role(sk), sk); | ||
1058 | return; | 1007 | return; |
1059 | } | 1008 | } |
1060 | 1009 | ||
1061 | win_count = packet->dccphrx_ccval; | ||
1062 | |||
1063 | loss = ccid3_hc_rx_detect_loss(sk, packet); | 1010 | loss = ccid3_hc_rx_detect_loss(sk, packet); |
1064 | 1011 | ||
1065 | if (DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_ACK) | 1012 | if (DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_ACK) |
1066 | return; | 1013 | return; |
1067 | 1014 | ||
1015 | payload_size = skb->len - dccp_hdr(skb)->dccph_doff * 4; | ||
1016 | ccid3_hc_rx_update_s(hcrx, payload_size); | ||
1017 | |||
1068 | switch (hcrx->ccid3hcrx_state) { | 1018 | switch (hcrx->ccid3hcrx_state) { |
1069 | case TFRC_RSTATE_NO_DATA: | 1019 | case TFRC_RSTATE_NO_DATA: |
1070 | ccid3_pr_debug("%s, sk=%p(%s), skb=%p, sending initial " | 1020 | ccid3_pr_debug("%s, sk=%p(%s), skb=%p, sending initial " |
@@ -1075,8 +1025,7 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
1075 | ccid3_hc_rx_set_state(sk, TFRC_RSTATE_DATA); | 1025 | ccid3_hc_rx_set_state(sk, TFRC_RSTATE_DATA); |
1076 | return; | 1026 | return; |
1077 | case TFRC_RSTATE_DATA: | 1027 | case TFRC_RSTATE_DATA: |
1078 | hcrx->ccid3hcrx_bytes_recv += skb->len - | 1028 | hcrx->ccid3hcrx_bytes_recv += payload_size; |
1079 | dccp_hdr(skb)->dccph_doff * 4; | ||
1080 | if (loss) | 1029 | if (loss) |
1081 | break; | 1030 | break; |
1082 | 1031 | ||
@@ -1087,10 +1036,8 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
1087 | ccid3_hc_rx_send_feedback(sk); | 1036 | ccid3_hc_rx_send_feedback(sk); |
1088 | } | 1037 | } |
1089 | return; | 1038 | return; |
1090 | default: | 1039 | case TFRC_RSTATE_TERM: |
1091 | printk(KERN_CRIT "%s: %s, sk=%p, Illegal state (%d)!\n", | 1040 | DCCP_BUG("Illegal %s state TERM, sk=%p", dccp_role(sk), sk); |
1092 | __FUNCTION__, dccp_role(sk), sk, hcrx->ccid3hcrx_state); | ||
1093 | dump_stack(); | ||
1094 | return; | 1041 | return; |
1095 | } | 1042 | } |
1096 | 1043 | ||
@@ -1107,10 +1054,8 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
1107 | /* Scaling up by 1000000 as fixed decimal */ | 1054 | /* Scaling up by 1000000 as fixed decimal */ |
1108 | if (i_mean != 0) | 1055 | if (i_mean != 0) |
1109 | hcrx->ccid3hcrx_p = 1000000 / i_mean; | 1056 | hcrx->ccid3hcrx_p = 1000000 / i_mean; |
1110 | } else { | 1057 | } else |
1111 | printk(KERN_CRIT "%s: empty loss hist\n",__FUNCTION__); | 1058 | DCCP_BUG("empty loss history"); |
1112 | dump_stack(); | ||
1113 | } | ||
1114 | 1059 | ||
1115 | if (hcrx->ccid3hcrx_p > p_prev) { | 1060 | if (hcrx->ccid3hcrx_p > p_prev) { |
1116 | ccid3_hc_rx_send_feedback(sk); | 1061 | ccid3_hc_rx_send_feedback(sk); |
@@ -1120,22 +1065,16 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
1120 | 1065 | ||
1121 | static int ccid3_hc_rx_init(struct ccid *ccid, struct sock *sk) | 1066 | static int ccid3_hc_rx_init(struct ccid *ccid, struct sock *sk) |
1122 | { | 1067 | { |
1123 | struct dccp_sock *dp = dccp_sk(sk); | ||
1124 | struct ccid3_hc_rx_sock *hcrx = ccid_priv(ccid); | 1068 | struct ccid3_hc_rx_sock *hcrx = ccid_priv(ccid); |
1125 | 1069 | ||
1126 | ccid3_pr_debug("%s, sk=%p\n", dccp_role(sk), sk); | 1070 | ccid3_pr_debug("%s, sk=%p\n", dccp_role(sk), sk); |
1127 | 1071 | ||
1128 | if (dp->dccps_packet_size >= TFRC_MIN_PACKET_SIZE && | ||
1129 | dp->dccps_packet_size <= TFRC_MAX_PACKET_SIZE) | ||
1130 | hcrx->ccid3hcrx_s = dp->dccps_packet_size; | ||
1131 | else | ||
1132 | hcrx->ccid3hcrx_s = TFRC_STD_PACKET_SIZE; | ||
1133 | |||
1134 | hcrx->ccid3hcrx_state = TFRC_RSTATE_NO_DATA; | 1072 | hcrx->ccid3hcrx_state = TFRC_RSTATE_NO_DATA; |
1135 | INIT_LIST_HEAD(&hcrx->ccid3hcrx_hist); | 1073 | INIT_LIST_HEAD(&hcrx->ccid3hcrx_hist); |
1136 | INIT_LIST_HEAD(&hcrx->ccid3hcrx_li_hist); | 1074 | INIT_LIST_HEAD(&hcrx->ccid3hcrx_li_hist); |
1137 | dccp_timestamp(sk, &hcrx->ccid3hcrx_tstamp_last_ack); | 1075 | dccp_timestamp(sk, &hcrx->ccid3hcrx_tstamp_last_ack); |
1138 | hcrx->ccid3hcrx_tstamp_last_feedback = hcrx->ccid3hcrx_tstamp_last_ack; | 1076 | hcrx->ccid3hcrx_tstamp_last_feedback = hcrx->ccid3hcrx_tstamp_last_ack; |
1077 | hcrx->ccid3hcrx_s = 0; | ||
1139 | hcrx->ccid3hcrx_rtt = 5000; /* XXX 5ms for now... */ | 1078 | hcrx->ccid3hcrx_rtt = 5000; /* XXX 5ms for now... */ |
1140 | return 0; | 1079 | return 0; |
1141 | } | 1080 | } |
@@ -1261,8 +1200,10 @@ static struct ccid_operations ccid3 = { | |||
1261 | .ccid_hc_tx_getsockopt = ccid3_hc_tx_getsockopt, | 1200 | .ccid_hc_tx_getsockopt = ccid3_hc_tx_getsockopt, |
1262 | }; | 1201 | }; |
1263 | 1202 | ||
1203 | #ifdef CONFIG_IP_DCCP_CCID3_DEBUG | ||
1264 | module_param(ccid3_debug, int, 0444); | 1204 | module_param(ccid3_debug, int, 0444); |
1265 | MODULE_PARM_DESC(ccid3_debug, "Enable debug messages"); | 1205 | MODULE_PARM_DESC(ccid3_debug, "Enable debug messages"); |
1206 | #endif | ||
1266 | 1207 | ||
1267 | static __init int ccid3_module_init(void) | 1208 | static __init int ccid3_module_init(void) |
1268 | { | 1209 | { |
diff --git a/net/dccp/ccids/ccid3.h b/net/dccp/ccids/ccid3.h index 0a2cb7536d26..27cb20ae1da8 100644 --- a/net/dccp/ccids/ccid3.h +++ b/net/dccp/ccids/ccid3.h | |||
@@ -42,20 +42,14 @@ | |||
42 | #include <linux/tfrc.h> | 42 | #include <linux/tfrc.h> |
43 | #include "../ccid.h" | 43 | #include "../ccid.h" |
44 | 44 | ||
45 | #define TFRC_MIN_PACKET_SIZE 16 | 45 | /* Two seconds as per RFC 3448 4.2 */ |
46 | #define TFRC_STD_PACKET_SIZE 256 | ||
47 | #define TFRC_MAX_PACKET_SIZE 65535 | ||
48 | |||
49 | /* Two seconds as per CCID3 spec */ | ||
50 | #define TFRC_INITIAL_TIMEOUT (2 * USEC_PER_SEC) | 46 | #define TFRC_INITIAL_TIMEOUT (2 * USEC_PER_SEC) |
51 | 47 | ||
52 | #define TFRC_INITIAL_IPI (USEC_PER_SEC / 4) | ||
53 | |||
54 | /* In usecs - half the scheduling granularity as per RFC3448 4.6 */ | 48 | /* In usecs - half the scheduling granularity as per RFC3448 4.6 */ |
55 | #define TFRC_OPSYS_HALF_TIME_GRAN (USEC_PER_SEC / (2 * HZ)) | 49 | #define TFRC_OPSYS_HALF_TIME_GRAN (USEC_PER_SEC / (2 * HZ)) |
56 | 50 | ||
57 | /* In seconds */ | 51 | /* Parameter t_mbi from [RFC 3448, 4.3]: backoff interval in seconds */ |
58 | #define TFRC_MAX_BACK_OFF_TIME 64 | 52 | #define TFRC_T_MBI 64 |
59 | 53 | ||
60 | #define TFRC_SMALLEST_P 40 | 54 | #define TFRC_SMALLEST_P 40 |
61 | 55 | ||
@@ -73,26 +67,36 @@ struct ccid3_options_received { | |||
73 | u32 ccid3or_receive_rate; | 67 | u32 ccid3or_receive_rate; |
74 | }; | 68 | }; |
75 | 69 | ||
76 | /** struct ccid3_hc_tx_sock - CCID3 sender half connection sock | 70 | /* TFRC sender states */ |
71 | enum ccid3_hc_tx_states { | ||
72 | TFRC_SSTATE_NO_SENT = 1, | ||
73 | TFRC_SSTATE_NO_FBACK, | ||
74 | TFRC_SSTATE_FBACK, | ||
75 | TFRC_SSTATE_TERM, | ||
76 | }; | ||
77 | |||
78 | /** struct ccid3_hc_tx_sock - CCID3 sender half-connection socket | ||
77 | * | 79 | * |
78 | * @ccid3hctx_state - Sender state | 80 | * @ccid3hctx_x - Current sending rate |
79 | * @ccid3hctx_x - Current sending rate | 81 | * @ccid3hctx_x_recv - Receive rate |
80 | * @ccid3hctx_x_recv - Receive rate | 82 | * @ccid3hctx_x_calc - Calculated send rate (RFC 3448, 3.1) |
81 | * @ccid3hctx_x_calc - Calculated send (?) rate | 83 | * @ccid3hctx_rtt - Estimate of current round trip time in usecs |
82 | * @ccid3hctx_s - Packet size | 84 | * @ccid3hctx_p - Current loss event rate (0-1) scaled by 1000000 |
83 | * @ccid3hctx_rtt - Estimate of current round trip time in usecs | 85 | * @ccid3hctx_s - Packet size |
84 | * @@ccid3hctx_p - Current loss event rate (0-1) scaled by 1000000 | 86 | * @ccid3hctx_t_rto - Retransmission Timeout (RFC 3448, 3.1) |
85 | * @ccid3hctx_last_win_count - Last window counter sent | 87 | * @ccid3hctx_t_ipi - Interpacket (send) interval (RFC 3448, 4.6) |
86 | * @ccid3hctx_t_last_win_count - Timestamp of earliest packet | 88 | * @ccid3hctx_state - Sender state, one of %ccid3_hc_tx_states |
87 | * with last_win_count value sent | 89 | * @ccid3hctx_last_win_count - Last window counter sent |
88 | * @ccid3hctx_no_feedback_timer - Handle to no feedback timer | 90 | * @ccid3hctx_t_last_win_count - Timestamp of earliest packet |
89 | * @ccid3hctx_idle - FIXME | 91 | * with last_win_count value sent |
90 | * @ccid3hctx_t_ld - Time last doubled during slow start | 92 | * @ccid3hctx_no_feedback_timer - Handle to no feedback timer |
91 | * @ccid3hctx_t_nom - Nominal send time of next packet | 93 | * @ccid3hctx_idle - Flag indicating that sender is idling |
92 | * @ccid3hctx_t_ipi - Interpacket (send) interval | 94 | * @ccid3hctx_t_ld - Time last doubled during slow start |
93 | * @ccid3hctx_delta - Send timer delta | 95 | * @ccid3hctx_t_nom - Nominal send time of next packet |
94 | * @ccid3hctx_hist - Packet history | 96 | * @ccid3hctx_delta - Send timer delta |
95 | */ | 97 | * @ccid3hctx_hist - Packet history |
98 | * @ccid3hctx_options_received - Parsed set of retrieved options | ||
99 | */ | ||
96 | struct ccid3_hc_tx_sock { | 100 | struct ccid3_hc_tx_sock { |
97 | struct tfrc_tx_info ccid3hctx_tfrc; | 101 | struct tfrc_tx_info ccid3hctx_tfrc; |
98 | #define ccid3hctx_x ccid3hctx_tfrc.tfrctx_x | 102 | #define ccid3hctx_x ccid3hctx_tfrc.tfrctx_x |
@@ -103,7 +107,7 @@ struct ccid3_hc_tx_sock { | |||
103 | #define ccid3hctx_t_rto ccid3hctx_tfrc.tfrctx_rto | 107 | #define ccid3hctx_t_rto ccid3hctx_tfrc.tfrctx_rto |
104 | #define ccid3hctx_t_ipi ccid3hctx_tfrc.tfrctx_ipi | 108 | #define ccid3hctx_t_ipi ccid3hctx_tfrc.tfrctx_ipi |
105 | u16 ccid3hctx_s; | 109 | u16 ccid3hctx_s; |
106 | u8 ccid3hctx_state; | 110 | enum ccid3_hc_tx_states ccid3hctx_state:8; |
107 | u8 ccid3hctx_last_win_count; | 111 | u8 ccid3hctx_last_win_count; |
108 | u8 ccid3hctx_idle; | 112 | u8 ccid3hctx_idle; |
109 | struct timeval ccid3hctx_t_last_win_count; | 113 | struct timeval ccid3hctx_t_last_win_count; |
@@ -115,23 +119,48 @@ struct ccid3_hc_tx_sock { | |||
115 | struct ccid3_options_received ccid3hctx_options_received; | 119 | struct ccid3_options_received ccid3hctx_options_received; |
116 | }; | 120 | }; |
117 | 121 | ||
122 | /* TFRC receiver states */ | ||
123 | enum ccid3_hc_rx_states { | ||
124 | TFRC_RSTATE_NO_DATA = 1, | ||
125 | TFRC_RSTATE_DATA, | ||
126 | TFRC_RSTATE_TERM = 127, | ||
127 | }; | ||
128 | |||
129 | /** struct ccid3_hc_rx_sock - CCID3 receiver half-connection socket | ||
130 | * | ||
131 | * @ccid3hcrx_x_recv - Receiver estimate of send rate (RFC 3448 4.3) | ||
132 | * @ccid3hcrx_rtt - Receiver estimate of rtt (non-standard) | ||
133 | * @ccid3hcrx_p - current loss event rate (RFC 3448 5.4) | ||
134 | * @ccid3hcrx_seqno_nonloss - Last received non-loss sequence number | ||
135 | * @ccid3hcrx_ccval_nonloss - Last received non-loss Window CCVal | ||
136 | * @ccid3hcrx_ccval_last_counter - Tracks window counter (RFC 4342, 8.1) | ||
137 | * @ccid3hcrx_state - receiver state, one of %ccid3_hc_rx_states | ||
138 | * @ccid3hcrx_bytes_recv - Total sum of DCCP payload bytes | ||
139 | * @ccid3hcrx_tstamp_last_feedback - Time at which last feedback was sent | ||
140 | * @ccid3hcrx_tstamp_last_ack - Time at which last feedback was sent | ||
141 | * @ccid3hcrx_hist - Packet history | ||
142 | * @ccid3hcrx_li_hist - Loss Interval History | ||
143 | * @ccid3hcrx_s - Received packet size in bytes | ||
144 | * @ccid3hcrx_pinv - Inverse of Loss Event Rate (RFC 4342, sec. 8.5) | ||
145 | * @ccid3hcrx_elapsed_time - Time since packet reception | ||
146 | */ | ||
118 | struct ccid3_hc_rx_sock { | 147 | struct ccid3_hc_rx_sock { |
119 | struct tfrc_rx_info ccid3hcrx_tfrc; | 148 | struct tfrc_rx_info ccid3hcrx_tfrc; |
120 | #define ccid3hcrx_x_recv ccid3hcrx_tfrc.tfrcrx_x_recv | 149 | #define ccid3hcrx_x_recv ccid3hcrx_tfrc.tfrcrx_x_recv |
121 | #define ccid3hcrx_rtt ccid3hcrx_tfrc.tfrcrx_rtt | 150 | #define ccid3hcrx_rtt ccid3hcrx_tfrc.tfrcrx_rtt |
122 | #define ccid3hcrx_p ccid3hcrx_tfrc.tfrcrx_p | 151 | #define ccid3hcrx_p ccid3hcrx_tfrc.tfrcrx_p |
123 | u64 ccid3hcrx_seqno_nonloss:48, | 152 | u64 ccid3hcrx_seqno_nonloss:48, |
124 | ccid3hcrx_ccval_nonloss:4, | 153 | ccid3hcrx_ccval_nonloss:4, |
125 | ccid3hcrx_state:8, | 154 | ccid3hcrx_ccval_last_counter:4; |
126 | ccid3hcrx_ccval_last_counter:4; | 155 | enum ccid3_hc_rx_states ccid3hcrx_state:8; |
127 | u32 ccid3hcrx_bytes_recv; | 156 | u32 ccid3hcrx_bytes_recv; |
128 | struct timeval ccid3hcrx_tstamp_last_feedback; | 157 | struct timeval ccid3hcrx_tstamp_last_feedback; |
129 | struct timeval ccid3hcrx_tstamp_last_ack; | 158 | struct timeval ccid3hcrx_tstamp_last_ack; |
130 | struct list_head ccid3hcrx_hist; | 159 | struct list_head ccid3hcrx_hist; |
131 | struct list_head ccid3hcrx_li_hist; | 160 | struct list_head ccid3hcrx_li_hist; |
132 | u16 ccid3hcrx_s; | 161 | u16 ccid3hcrx_s; |
133 | u32 ccid3hcrx_pinv; | 162 | u32 ccid3hcrx_pinv; |
134 | u32 ccid3hcrx_elapsed_time; | 163 | u32 ccid3hcrx_elapsed_time; |
135 | }; | 164 | }; |
136 | 165 | ||
137 | static inline struct ccid3_hc_tx_sock *ccid3_hc_tx_sk(const struct sock *sk) | 166 | static inline struct ccid3_hc_tx_sock *ccid3_hc_tx_sk(const struct sock *sk) |
diff --git a/net/dccp/ccids/lib/loss_interval.c b/net/dccp/ccids/lib/loss_interval.c index 906c81ab9d4f..48b9b93f8acb 100644 --- a/net/dccp/ccids/lib/loss_interval.c +++ b/net/dccp/ccids/lib/loss_interval.c | |||
@@ -13,7 +13,7 @@ | |||
13 | 13 | ||
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | #include <net/sock.h> | 15 | #include <net/sock.h> |
16 | 16 | #include "../../dccp.h" | |
17 | #include "loss_interval.h" | 17 | #include "loss_interval.h" |
18 | 18 | ||
19 | struct dccp_li_hist *dccp_li_hist_new(const char *name) | 19 | struct dccp_li_hist *dccp_li_hist_new(const char *name) |
@@ -109,7 +109,7 @@ u32 dccp_li_hist_calc_i_mean(struct list_head *list) | |||
109 | i_tot = max(i_tot0, i_tot1); | 109 | i_tot = max(i_tot0, i_tot1); |
110 | 110 | ||
111 | if (!w_tot) { | 111 | if (!w_tot) { |
112 | LIMIT_NETDEBUG(KERN_WARNING "%s: w_tot = 0\n", __FUNCTION__); | 112 | DCCP_WARN("w_tot = 0\n"); |
113 | return 1; | 113 | return 1; |
114 | } | 114 | } |
115 | 115 | ||
@@ -128,7 +128,7 @@ int dccp_li_hist_interval_new(struct dccp_li_hist *hist, | |||
128 | entry = dccp_li_hist_entry_new(hist, SLAB_ATOMIC); | 128 | entry = dccp_li_hist_entry_new(hist, SLAB_ATOMIC); |
129 | if (entry == NULL) { | 129 | if (entry == NULL) { |
130 | dccp_li_hist_purge(hist, list); | 130 | dccp_li_hist_purge(hist, list); |
131 | dump_stack(); | 131 | DCCP_BUG("loss interval list entry is NULL"); |
132 | return 0; | 132 | return 0; |
133 | } | 133 | } |
134 | entry->dccplih_interval = ~0; | 134 | entry->dccplih_interval = ~0; |
diff --git a/net/dccp/ccids/lib/tfrc_equation.c b/net/dccp/ccids/lib/tfrc_equation.c index 44076e0c6591..2601012383fb 100644 --- a/net/dccp/ccids/lib/tfrc_equation.c +++ b/net/dccp/ccids/lib/tfrc_equation.c | |||
@@ -13,9 +13,8 @@ | |||
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
16 | |||
17 | #include <asm/div64.h> | 16 | #include <asm/div64.h> |
18 | 17 | #include "../../dccp.h" | |
19 | #include "tfrc.h" | 18 | #include "tfrc.h" |
20 | 19 | ||
21 | #define TFRC_CALC_X_ARRSIZE 500 | 20 | #define TFRC_CALC_X_ARRSIZE 500 |
@@ -588,8 +587,10 @@ u32 tfrc_calc_x(u16 s, u32 R, u32 p) | |||
588 | /* p should be 0 unless there is a bug in my code */ | 587 | /* p should be 0 unless there is a bug in my code */ |
589 | index = 0; | 588 | index = 0; |
590 | 589 | ||
591 | if (R == 0) | 590 | if (R == 0) { |
591 | DCCP_WARN("RTT==0, setting to 1\n"); | ||
592 | R = 1; /* RTT can't be zero or else divide by zero */ | 592 | R = 1; /* RTT can't be zero or else divide by zero */ |
593 | } | ||
593 | 594 | ||
594 | BUG_ON(index >= TFRC_CALC_X_ARRSIZE); | 595 | BUG_ON(index >= TFRC_CALC_X_ARRSIZE); |
595 | 596 | ||
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h index 272e8584564e..68886986c8e4 100644 --- a/net/dccp/dccp.h +++ b/net/dccp/dccp.h | |||
@@ -18,15 +18,33 @@ | |||
18 | #include <net/tcp.h> | 18 | #include <net/tcp.h> |
19 | #include "ackvec.h" | 19 | #include "ackvec.h" |
20 | 20 | ||
21 | /* | ||
22 | * DCCP - specific warning and debugging macros. | ||
23 | */ | ||
24 | #define DCCP_WARN(fmt, a...) LIMIT_NETDEBUG(KERN_WARNING "%s: " fmt, \ | ||
25 | __FUNCTION__, ##a) | ||
26 | #define DCCP_CRIT(fmt, a...) printk(KERN_CRIT fmt " at %s:%d/%s()\n", ##a, \ | ||
27 | __FILE__, __LINE__, __FUNCTION__) | ||
28 | #define DCCP_BUG(a...) do { DCCP_CRIT("BUG: " a); dump_stack(); } while(0) | ||
29 | #define DCCP_BUG_ON(cond) do { if (unlikely((cond) != 0)) \ | ||
30 | DCCP_BUG("\"%s\" holds (exception!)", \ | ||
31 | __stringify(cond)); \ | ||
32 | } while (0) | ||
33 | |||
34 | #ifdef MODULE | ||
35 | #define DCCP_PRINTK(enable, fmt, args...) do { if (enable) \ | ||
36 | printk(fmt, ##args); \ | ||
37 | } while(0) | ||
38 | #else | ||
39 | #define DCCP_PRINTK(enable, fmt, args...) printk(fmt, ##args) | ||
40 | #endif | ||
41 | #define DCCP_PR_DEBUG(enable, fmt, a...) DCCP_PRINTK(enable, KERN_DEBUG \ | ||
42 | "%s: " fmt, __FUNCTION__, ##a) | ||
43 | |||
21 | #ifdef CONFIG_IP_DCCP_DEBUG | 44 | #ifdef CONFIG_IP_DCCP_DEBUG |
22 | extern int dccp_debug; | 45 | extern int dccp_debug; |
23 | 46 | #define dccp_pr_debug(format, a...) DCCP_PR_DEBUG(dccp_debug, format, ##a) | |
24 | #define dccp_pr_debug(format, a...) \ | 47 | #define dccp_pr_debug_cat(format, a...) DCCP_PRINTK(dccp_debug, format, ##a) |
25 | do { if (dccp_debug) \ | ||
26 | printk(KERN_DEBUG "%s: " format, __FUNCTION__ , ##a); \ | ||
27 | } while (0) | ||
28 | #define dccp_pr_debug_cat(format, a...) do { if (dccp_debug) \ | ||
29 | printk(format, ##a); } while (0) | ||
30 | #else | 48 | #else |
31 | #define dccp_pr_debug(format, a...) | 49 | #define dccp_pr_debug(format, a...) |
32 | #define dccp_pr_debug_cat(format, a...) | 50 | #define dccp_pr_debug_cat(format, a...) |
@@ -35,17 +53,21 @@ extern int dccp_debug; | |||
35 | extern struct inet_hashinfo dccp_hashinfo; | 53 | extern struct inet_hashinfo dccp_hashinfo; |
36 | 54 | ||
37 | extern atomic_t dccp_orphan_count; | 55 | extern atomic_t dccp_orphan_count; |
38 | extern int dccp_tw_count; | ||
39 | extern void dccp_tw_deschedule(struct inet_timewait_sock *tw); | ||
40 | 56 | ||
41 | extern void dccp_time_wait(struct sock *sk, int state, int timeo); | 57 | extern void dccp_time_wait(struct sock *sk, int state, int timeo); |
42 | 58 | ||
43 | /* FIXME: Right size this */ | 59 | /* |
44 | #define DCCP_MAX_OPT_LEN 128 | 60 | * Set safe upper bounds for header and option length. Since Data Offset is 8 |
45 | 61 | * bits (RFC 4340, sec. 5.1), the total header length can never be more than | |
46 | #define DCCP_MAX_PACKET_HDR 32 | 62 | * 4 * 255 = 1020 bytes. The largest possible header length is 28 bytes (X=1): |
47 | 63 | * - DCCP-Response with ACK Subheader and 4 bytes of Service code OR | |
48 | #define MAX_DCCP_HEADER (DCCP_MAX_PACKET_HDR + DCCP_MAX_OPT_LEN + MAX_HEADER) | 64 | * - DCCP-Reset with ACK Subheader and 4 bytes of Reset Code fields |
65 | * Hence a safe upper bound for the maximum option length is 1020-28 = 992 | ||
66 | */ | ||
67 | #define MAX_DCCP_SPECIFIC_HEADER (255 * sizeof(int)) | ||
68 | #define DCCP_MAX_PACKET_HDR 28 | ||
69 | #define DCCP_MAX_OPT_LEN (MAX_DCCP_SPECIFIC_HEADER - DCCP_MAX_PACKET_HDR) | ||
70 | #define MAX_DCCP_HEADER (MAX_DCCP_SPECIFIC_HEADER + MAX_HEADER) | ||
49 | 71 | ||
50 | #define DCCP_TIMEWAIT_LEN (60 * HZ) /* how long to wait to destroy TIME-WAIT | 72 | #define DCCP_TIMEWAIT_LEN (60 * HZ) /* how long to wait to destroy TIME-WAIT |
51 | * state, about 60 seconds */ | 73 | * state, about 60 seconds */ |
@@ -58,6 +80,20 @@ extern void dccp_time_wait(struct sock *sk, int state, int timeo); | |||
58 | 80 | ||
59 | #define DCCP_RTO_MAX ((unsigned)(120 * HZ)) /* FIXME: using TCP value */ | 81 | #define DCCP_RTO_MAX ((unsigned)(120 * HZ)) /* FIXME: using TCP value */ |
60 | 82 | ||
83 | #define DCCP_XMIT_TIMEO 30000 /* Time/msecs for blocking transmit per packet */ | ||
84 | |||
85 | /* sysctl variables for DCCP */ | ||
86 | extern int sysctl_dccp_request_retries; | ||
87 | extern int sysctl_dccp_retries1; | ||
88 | extern int sysctl_dccp_retries2; | ||
89 | extern int sysctl_dccp_feat_sequence_window; | ||
90 | extern int sysctl_dccp_feat_rx_ccid; | ||
91 | extern int sysctl_dccp_feat_tx_ccid; | ||
92 | extern int sysctl_dccp_feat_ack_ratio; | ||
93 | extern int sysctl_dccp_feat_send_ack_vector; | ||
94 | extern int sysctl_dccp_feat_send_ndp_count; | ||
95 | extern int sysctl_dccp_tx_qlen; | ||
96 | |||
61 | /* is seq1 < seq2 ? */ | 97 | /* is seq1 < seq2 ? */ |
62 | static inline int before48(const u64 seq1, const u64 seq2) | 98 | static inline int before48(const u64 seq1, const u64 seq2) |
63 | { | 99 | { |
@@ -123,10 +159,36 @@ DECLARE_SNMP_STAT(struct dccp_mib, dccp_statistics); | |||
123 | #define DCCP_ADD_STATS_USER(field, val) \ | 159 | #define DCCP_ADD_STATS_USER(field, val) \ |
124 | SNMP_ADD_STATS_USER(dccp_statistics, field, val) | 160 | SNMP_ADD_STATS_USER(dccp_statistics, field, val) |
125 | 161 | ||
162 | /* | ||
163 | * Checksumming routines | ||
164 | */ | ||
165 | static inline int dccp_csum_coverage(const struct sk_buff *skb) | ||
166 | { | ||
167 | const struct dccp_hdr* dh = dccp_hdr(skb); | ||
168 | |||
169 | if (dh->dccph_cscov == 0) | ||
170 | return skb->len; | ||
171 | return (dh->dccph_doff + dh->dccph_cscov - 1) * sizeof(u32); | ||
172 | } | ||
173 | |||
174 | static inline void dccp_csum_outgoing(struct sk_buff *skb) | ||
175 | { | ||
176 | int cov = dccp_csum_coverage(skb); | ||
177 | |||
178 | if (cov >= skb->len) | ||
179 | dccp_hdr(skb)->dccph_cscov = 0; | ||
180 | |||
181 | skb->csum = skb_checksum(skb, 0, (cov > skb->len)? skb->len : cov, 0); | ||
182 | } | ||
183 | |||
184 | extern void dccp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb); | ||
185 | |||
126 | extern int dccp_retransmit_skb(struct sock *sk, struct sk_buff *skb); | 186 | extern int dccp_retransmit_skb(struct sock *sk, struct sk_buff *skb); |
127 | 187 | ||
128 | extern void dccp_send_ack(struct sock *sk); | 188 | extern void dccp_send_ack(struct sock *sk); |
129 | extern void dccp_send_delayed_ack(struct sock *sk); | 189 | extern void dccp_send_delayed_ack(struct sock *sk); |
190 | extern void dccp_reqsk_send_ack(struct sk_buff *sk, struct request_sock *rsk); | ||
191 | |||
130 | extern void dccp_send_sync(struct sock *sk, const u64 seq, | 192 | extern void dccp_send_sync(struct sock *sk, const u64 seq, |
131 | const enum dccp_pkt_type pkt_type); | 193 | const enum dccp_pkt_type pkt_type); |
132 | 194 | ||
@@ -147,18 +209,7 @@ extern const char *dccp_state_name(const int state); | |||
147 | extern void dccp_set_state(struct sock *sk, const int state); | 209 | extern void dccp_set_state(struct sock *sk, const int state); |
148 | extern void dccp_done(struct sock *sk); | 210 | extern void dccp_done(struct sock *sk); |
149 | 211 | ||
150 | static inline void dccp_openreq_init(struct request_sock *req, | 212 | extern void dccp_reqsk_init(struct request_sock *req, struct sk_buff *skb); |
151 | struct dccp_sock *dp, | ||
152 | struct sk_buff *skb) | ||
153 | { | ||
154 | /* | ||
155 | * FIXME: fill in the other req fields from the DCCP options | ||
156 | * received | ||
157 | */ | ||
158 | inet_rsk(req)->rmt_port = dccp_hdr(skb)->dccph_sport; | ||
159 | inet_rsk(req)->acked = 0; | ||
160 | req->rcv_wnd = 0; | ||
161 | } | ||
162 | 213 | ||
163 | extern int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb); | 214 | extern int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb); |
164 | 215 | ||
@@ -217,14 +268,9 @@ extern void dccp_shutdown(struct sock *sk, int how); | |||
217 | extern int inet_dccp_listen(struct socket *sock, int backlog); | 268 | extern int inet_dccp_listen(struct socket *sock, int backlog); |
218 | extern unsigned int dccp_poll(struct file *file, struct socket *sock, | 269 | extern unsigned int dccp_poll(struct file *file, struct socket *sock, |
219 | poll_table *wait); | 270 | poll_table *wait); |
220 | extern void dccp_v4_send_check(struct sock *sk, int len, | ||
221 | struct sk_buff *skb); | ||
222 | extern int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, | 271 | extern int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, |
223 | int addr_len); | 272 | int addr_len); |
224 | 273 | ||
225 | extern int dccp_v4_checksum(const struct sk_buff *skb, | ||
226 | const __be32 saddr, const __be32 daddr); | ||
227 | |||
228 | extern int dccp_send_reset(struct sock *sk, enum dccp_reset_codes code); | 274 | extern int dccp_send_reset(struct sock *sk, enum dccp_reset_codes code); |
229 | extern void dccp_send_close(struct sock *sk, const int active); | 275 | extern void dccp_send_close(struct sock *sk, const int active); |
230 | extern int dccp_invalid_packet(struct sk_buff *skb); | 276 | extern int dccp_invalid_packet(struct sk_buff *skb); |
diff --git a/net/dccp/feat.c b/net/dccp/feat.c index a1b0682ee77c..4dc487f27a1f 100644 --- a/net/dccp/feat.c +++ b/net/dccp/feat.c | |||
@@ -12,7 +12,6 @@ | |||
12 | 12 | ||
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | 14 | ||
15 | #include "dccp.h" | ||
16 | #include "ccid.h" | 15 | #include "ccid.h" |
17 | #include "feat.h" | 16 | #include "feat.h" |
18 | 17 | ||
@@ -23,9 +22,17 @@ int dccp_feat_change(struct dccp_minisock *dmsk, u8 type, u8 feature, | |||
23 | { | 22 | { |
24 | struct dccp_opt_pend *opt; | 23 | struct dccp_opt_pend *opt; |
25 | 24 | ||
26 | dccp_pr_debug("feat change type=%d feat=%d\n", type, feature); | 25 | dccp_feat_debug(type, feature, *val); |
27 | 26 | ||
28 | /* XXX sanity check feat change request */ | 27 | if (!dccp_feat_is_valid_type(type)) { |
28 | DCCP_WARN("option type %d invalid in negotiation\n", type); | ||
29 | return 1; | ||
30 | } | ||
31 | if (!dccp_feat_is_valid_length(type, feature, len)) { | ||
32 | DCCP_WARN("invalid length %d\n", len); | ||
33 | return 1; | ||
34 | } | ||
35 | /* XXX add further sanity checks */ | ||
29 | 36 | ||
30 | /* check if that feature is already being negotiated */ | 37 | /* check if that feature is already being negotiated */ |
31 | list_for_each_entry(opt, &dmsk->dccpms_pending, dccpop_node) { | 38 | list_for_each_entry(opt, &dmsk->dccpms_pending, dccpop_node) { |
@@ -95,14 +102,14 @@ static int dccp_feat_update_ccid(struct sock *sk, u8 type, u8 new_ccid_nr) | |||
95 | /* XXX taking only u8 vals */ | 102 | /* XXX taking only u8 vals */ |
96 | static int dccp_feat_update(struct sock *sk, u8 type, u8 feat, u8 val) | 103 | static int dccp_feat_update(struct sock *sk, u8 type, u8 feat, u8 val) |
97 | { | 104 | { |
98 | dccp_pr_debug("changing [%d] feat %d to %d\n", type, feat, val); | 105 | dccp_feat_debug(type, feat, val); |
99 | 106 | ||
100 | switch (feat) { | 107 | switch (feat) { |
101 | case DCCPF_CCID: | 108 | case DCCPF_CCID: |
102 | return dccp_feat_update_ccid(sk, type, val); | 109 | return dccp_feat_update_ccid(sk, type, val); |
103 | default: | 110 | default: |
104 | dccp_pr_debug("IMPLEMENT changing [%d] feat %d to %d\n", | 111 | dccp_pr_debug("UNIMPLEMENTED: %s(%d, ...)\n", |
105 | type, feat, val); | 112 | dccp_feat_typename(type), feat); |
106 | break; | 113 | break; |
107 | } | 114 | } |
108 | return 0; | 115 | return 0; |
@@ -162,7 +169,8 @@ static int dccp_feat_reconcile(struct sock *sk, struct dccp_opt_pend *opt, | |||
162 | break; | 169 | break; |
163 | 170 | ||
164 | default: | 171 | default: |
165 | WARN_ON(1); /* XXX implement res */ | 172 | DCCP_BUG("Fell through, feat=%d", opt->dccpop_feat); |
173 | /* XXX implement res */ | ||
166 | return -EFAULT; | 174 | return -EFAULT; |
167 | } | 175 | } |
168 | 176 | ||
@@ -265,10 +273,10 @@ static int dccp_feat_nn(struct sock *sk, u8 type, u8 feature, u8 *val, u8 len) | |||
265 | u8 *copy; | 273 | u8 *copy; |
266 | int rc; | 274 | int rc; |
267 | 275 | ||
268 | /* NN features must be change L */ | 276 | /* NN features must be Change L (sec. 6.3.2) */ |
269 | if (type == DCCPO_CHANGE_R) { | 277 | if (type != DCCPO_CHANGE_L) { |
270 | dccp_pr_debug("received CHANGE_R %d for NN feat %d\n", | 278 | dccp_pr_debug("received %s for NN feature %d\n", |
271 | type, feature); | 279 | dccp_feat_typename(type), feature); |
272 | return -EFAULT; | 280 | return -EFAULT; |
273 | } | 281 | } |
274 | 282 | ||
@@ -279,12 +287,11 @@ static int dccp_feat_nn(struct sock *sk, u8 type, u8 feature, u8 *val, u8 len) | |||
279 | if (opt == NULL) | 287 | if (opt == NULL) |
280 | return -ENOMEM; | 288 | return -ENOMEM; |
281 | 289 | ||
282 | copy = kmalloc(len, GFP_ATOMIC); | 290 | copy = kmemdup(val, len, GFP_ATOMIC); |
283 | if (copy == NULL) { | 291 | if (copy == NULL) { |
284 | kfree(opt); | 292 | kfree(opt); |
285 | return -ENOMEM; | 293 | return -ENOMEM; |
286 | } | 294 | } |
287 | memcpy(copy, val, len); | ||
288 | 295 | ||
289 | opt->dccpop_type = DCCPO_CONFIRM_R; /* NN can only confirm R */ | 296 | opt->dccpop_type = DCCPO_CONFIRM_R; /* NN can only confirm R */ |
290 | opt->dccpop_feat = feature; | 297 | opt->dccpop_feat = feature; |
@@ -299,7 +306,8 @@ static int dccp_feat_nn(struct sock *sk, u8 type, u8 feature, u8 *val, u8 len) | |||
299 | return rc; | 306 | return rc; |
300 | } | 307 | } |
301 | 308 | ||
302 | dccp_pr_debug("Confirming NN feature %d (val=%d)\n", feature, *copy); | 309 | dccp_feat_debug(type, feature, *copy); |
310 | |||
303 | list_add_tail(&opt->dccpop_node, &dmsk->dccpms_conf); | 311 | list_add_tail(&opt->dccpop_node, &dmsk->dccpms_conf); |
304 | 312 | ||
305 | return 0; | 313 | return 0; |
@@ -318,14 +326,19 @@ static void dccp_feat_empty_confirm(struct dccp_minisock *dmsk, | |||
318 | return; | 326 | return; |
319 | } | 327 | } |
320 | 328 | ||
321 | opt->dccpop_type = type == DCCPO_CHANGE_L ? DCCPO_CONFIRM_R : | 329 | switch (type) { |
322 | DCCPO_CONFIRM_L; | 330 | case DCCPO_CHANGE_L: opt->dccpop_type = DCCPO_CONFIRM_R; break; |
331 | case DCCPO_CHANGE_R: opt->dccpop_type = DCCPO_CONFIRM_L; break; | ||
332 | default: DCCP_WARN("invalid type %d\n", type); return; | ||
333 | |||
334 | } | ||
323 | opt->dccpop_feat = feature; | 335 | opt->dccpop_feat = feature; |
324 | opt->dccpop_val = NULL; | 336 | opt->dccpop_val = NULL; |
325 | opt->dccpop_len = 0; | 337 | opt->dccpop_len = 0; |
326 | 338 | ||
327 | /* change feature */ | 339 | /* change feature */ |
328 | dccp_pr_debug("Empty confirm feature %d type %d\n", feature, type); | 340 | dccp_pr_debug("Empty %s(%d)\n", dccp_feat_typename(type), feature); |
341 | |||
329 | list_add_tail(&opt->dccpop_node, &dmsk->dccpms_conf); | 342 | list_add_tail(&opt->dccpop_node, &dmsk->dccpms_conf); |
330 | } | 343 | } |
331 | 344 | ||
@@ -359,7 +372,7 @@ int dccp_feat_change_recv(struct sock *sk, u8 type, u8 feature, u8 *val, u8 len) | |||
359 | { | 372 | { |
360 | int rc; | 373 | int rc; |
361 | 374 | ||
362 | dccp_pr_debug("got feat change type=%d feat=%d\n", type, feature); | 375 | dccp_feat_debug(type, feature, *val); |
363 | 376 | ||
364 | /* figure out if it's SP or NN feature */ | 377 | /* figure out if it's SP or NN feature */ |
365 | switch (feature) { | 378 | switch (feature) { |
@@ -375,6 +388,8 @@ int dccp_feat_change_recv(struct sock *sk, u8 type, u8 feature, u8 *val, u8 len) | |||
375 | 388 | ||
376 | /* XXX implement other features */ | 389 | /* XXX implement other features */ |
377 | default: | 390 | default: |
391 | dccp_pr_debug("UNIMPLEMENTED: not handling %s(%d, ...)\n", | ||
392 | dccp_feat_typename(type), feature); | ||
378 | rc = -EFAULT; | 393 | rc = -EFAULT; |
379 | break; | 394 | break; |
380 | } | 395 | } |
@@ -403,20 +418,27 @@ int dccp_feat_confirm_recv(struct sock *sk, u8 type, u8 feature, | |||
403 | u8 t; | 418 | u8 t; |
404 | struct dccp_opt_pend *opt; | 419 | struct dccp_opt_pend *opt; |
405 | struct dccp_minisock *dmsk = dccp_msk(sk); | 420 | struct dccp_minisock *dmsk = dccp_msk(sk); |
406 | int rc = 1; | 421 | int found = 0; |
407 | int all_confirmed = 1; | 422 | int all_confirmed = 1; |
408 | 423 | ||
409 | dccp_pr_debug("got feat confirm type=%d feat=%d\n", type, feature); | 424 | dccp_feat_debug(type, feature, *val); |
410 | |||
411 | /* XXX sanity check type & feat */ | ||
412 | 425 | ||
413 | /* locate our change request */ | 426 | /* locate our change request */ |
414 | t = type == DCCPO_CONFIRM_L ? DCCPO_CHANGE_R : DCCPO_CHANGE_L; | 427 | switch (type) { |
428 | case DCCPO_CONFIRM_L: t = DCCPO_CHANGE_R; break; | ||
429 | case DCCPO_CONFIRM_R: t = DCCPO_CHANGE_L; break; | ||
430 | default: DCCP_WARN("invalid type %d\n", type); | ||
431 | return 1; | ||
432 | |||
433 | } | ||
434 | /* XXX sanity check feature value */ | ||
415 | 435 | ||
416 | list_for_each_entry(opt, &dmsk->dccpms_pending, dccpop_node) { | 436 | list_for_each_entry(opt, &dmsk->dccpms_pending, dccpop_node) { |
417 | if (!opt->dccpop_conf && opt->dccpop_type == t && | 437 | if (!opt->dccpop_conf && opt->dccpop_type == t && |
418 | opt->dccpop_feat == feature) { | 438 | opt->dccpop_feat == feature) { |
419 | /* we found it */ | 439 | found = 1; |
440 | dccp_pr_debug("feature %d found\n", opt->dccpop_feat); | ||
441 | |||
420 | /* XXX do sanity check */ | 442 | /* XXX do sanity check */ |
421 | 443 | ||
422 | opt->dccpop_conf = 1; | 444 | opt->dccpop_conf = 1; |
@@ -425,9 +447,7 @@ int dccp_feat_confirm_recv(struct sock *sk, u8 type, u8 feature, | |||
425 | dccp_feat_update(sk, opt->dccpop_type, | 447 | dccp_feat_update(sk, opt->dccpop_type, |
426 | opt->dccpop_feat, *val); | 448 | opt->dccpop_feat, *val); |
427 | 449 | ||
428 | dccp_pr_debug("feat %d type %d confirmed %d\n", | 450 | /* XXX check the return value of dccp_feat_update */ |
429 | feature, type, *val); | ||
430 | rc = 0; | ||
431 | break; | 451 | break; |
432 | } | 452 | } |
433 | 453 | ||
@@ -446,9 +466,9 @@ int dccp_feat_confirm_recv(struct sock *sk, u8 type, u8 feature, | |||
446 | inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS); | 466 | inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS); |
447 | } | 467 | } |
448 | 468 | ||
449 | if (rc) | 469 | if (!found) |
450 | dccp_pr_debug("feat %d type %d never requested\n", | 470 | dccp_pr_debug("%s(%d, ...) never requested\n", |
451 | feature, type); | 471 | dccp_feat_typename(type), feature); |
452 | return 0; | 472 | return 0; |
453 | } | 473 | } |
454 | 474 | ||
@@ -501,20 +521,18 @@ int dccp_feat_clone(struct sock *oldsk, struct sock *newsk) | |||
501 | list_for_each_entry(opt, &olddmsk->dccpms_pending, dccpop_node) { | 521 | list_for_each_entry(opt, &olddmsk->dccpms_pending, dccpop_node) { |
502 | struct dccp_opt_pend *newopt; | 522 | struct dccp_opt_pend *newopt; |
503 | /* copy the value of the option */ | 523 | /* copy the value of the option */ |
504 | u8 *val = kmalloc(opt->dccpop_len, GFP_ATOMIC); | 524 | u8 *val = kmemdup(opt->dccpop_val, opt->dccpop_len, GFP_ATOMIC); |
505 | 525 | ||
506 | if (val == NULL) | 526 | if (val == NULL) |
507 | goto out_clean; | 527 | goto out_clean; |
508 | memcpy(val, opt->dccpop_val, opt->dccpop_len); | ||
509 | 528 | ||
510 | newopt = kmalloc(sizeof(*newopt), GFP_ATOMIC); | 529 | newopt = kmemdup(opt, sizeof(*newopt), GFP_ATOMIC); |
511 | if (newopt == NULL) { | 530 | if (newopt == NULL) { |
512 | kfree(val); | 531 | kfree(val); |
513 | goto out_clean; | 532 | goto out_clean; |
514 | } | 533 | } |
515 | 534 | ||
516 | /* insert the option */ | 535 | /* insert the option */ |
517 | memcpy(newopt, opt, sizeof(*newopt)); | ||
518 | newopt->dccpop_val = val; | 536 | newopt->dccpop_val = val; |
519 | list_add_tail(&newopt->dccpop_node, &newdmsk->dccpms_pending); | 537 | list_add_tail(&newopt->dccpop_node, &newdmsk->dccpms_pending); |
520 | 538 | ||
@@ -545,10 +563,9 @@ static int __dccp_feat_init(struct dccp_minisock *dmsk, u8 type, u8 feat, | |||
545 | u8 *val, u8 len) | 563 | u8 *val, u8 len) |
546 | { | 564 | { |
547 | int rc = -ENOMEM; | 565 | int rc = -ENOMEM; |
548 | u8 *copy = kmalloc(len, GFP_KERNEL); | 566 | u8 *copy = kmemdup(val, len, GFP_KERNEL); |
549 | 567 | ||
550 | if (copy != NULL) { | 568 | if (copy != NULL) { |
551 | memcpy(copy, val, len); | ||
552 | rc = dccp_feat_change(dmsk, type, feat, copy, len, GFP_KERNEL); | 569 | rc = dccp_feat_change(dmsk, type, feat, copy, len, GFP_KERNEL); |
553 | if (rc) | 570 | if (rc) |
554 | kfree(copy); | 571 | kfree(copy); |
@@ -583,3 +600,45 @@ out: | |||
583 | } | 600 | } |
584 | 601 | ||
585 | EXPORT_SYMBOL_GPL(dccp_feat_init); | 602 | EXPORT_SYMBOL_GPL(dccp_feat_init); |
603 | |||
604 | #ifdef CONFIG_IP_DCCP_DEBUG | ||
605 | const char *dccp_feat_typename(const u8 type) | ||
606 | { | ||
607 | switch(type) { | ||
608 | case DCCPO_CHANGE_L: return("ChangeL"); | ||
609 | case DCCPO_CONFIRM_L: return("ConfirmL"); | ||
610 | case DCCPO_CHANGE_R: return("ChangeR"); | ||
611 | case DCCPO_CONFIRM_R: return("ConfirmR"); | ||
612 | /* the following case must not appear in feature negotation */ | ||
613 | default: dccp_pr_debug("unknown type %d [BUG!]\n", type); | ||
614 | } | ||
615 | return NULL; | ||
616 | } | ||
617 | |||
618 | EXPORT_SYMBOL_GPL(dccp_feat_typename); | ||
619 | |||
620 | const char *dccp_feat_name(const u8 feat) | ||
621 | { | ||
622 | static const char *feature_names[] = { | ||
623 | [DCCPF_RESERVED] = "Reserved", | ||
624 | [DCCPF_CCID] = "CCID", | ||
625 | [DCCPF_SHORT_SEQNOS] = "Allow Short Seqnos", | ||
626 | [DCCPF_SEQUENCE_WINDOW] = "Sequence Window", | ||
627 | [DCCPF_ECN_INCAPABLE] = "ECN Incapable", | ||
628 | [DCCPF_ACK_RATIO] = "Ack Ratio", | ||
629 | [DCCPF_SEND_ACK_VECTOR] = "Send ACK Vector", | ||
630 | [DCCPF_SEND_NDP_COUNT] = "Send NDP Count", | ||
631 | [DCCPF_MIN_CSUM_COVER] = "Min. Csum Coverage", | ||
632 | [DCCPF_DATA_CHECKSUM] = "Send Data Checksum", | ||
633 | }; | ||
634 | if (feat >= DCCPF_MIN_CCID_SPECIFIC) | ||
635 | return "CCID-specific"; | ||
636 | |||
637 | if (dccp_feat_is_reserved(feat)) | ||
638 | return feature_names[DCCPF_RESERVED]; | ||
639 | |||
640 | return feature_names[feat]; | ||
641 | } | ||
642 | |||
643 | EXPORT_SYMBOL_GPL(dccp_feat_name); | ||
644 | #endif /* CONFIG_IP_DCCP_DEBUG */ | ||
diff --git a/net/dccp/feat.h b/net/dccp/feat.h index cee553d416ca..2c373ad7edcf 100644 --- a/net/dccp/feat.h +++ b/net/dccp/feat.h | |||
@@ -12,9 +12,46 @@ | |||
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <linux/types.h> | 14 | #include <linux/types.h> |
15 | #include "dccp.h" | ||
15 | 16 | ||
16 | struct sock; | 17 | static inline int dccp_feat_is_valid_length(u8 type, u8 feature, u8 len) |
17 | struct dccp_minisock; | 18 | { |
19 | /* sec. 6.1: Confirm has at least length 3, | ||
20 | * sec. 6.2: Change has at least length 4 */ | ||
21 | if (len < 3) | ||
22 | return 1; | ||
23 | if (len < 4 && (type == DCCPO_CHANGE_L || type == DCCPO_CHANGE_R)) | ||
24 | return 1; | ||
25 | /* XXX: add per-feature length validation (sec. 6.6.8) */ | ||
26 | return 0; | ||
27 | } | ||
28 | |||
29 | static inline int dccp_feat_is_reserved(const u8 feat) | ||
30 | { | ||
31 | return (feat > DCCPF_DATA_CHECKSUM && | ||
32 | feat < DCCPF_MIN_CCID_SPECIFIC) || | ||
33 | feat == DCCPF_RESERVED; | ||
34 | } | ||
35 | |||
36 | /* feature negotiation knows only these four option types (RFC 4340, sec. 6) */ | ||
37 | static inline int dccp_feat_is_valid_type(const u8 optnum) | ||
38 | { | ||
39 | return optnum >= DCCPO_CHANGE_L && optnum <= DCCPO_CONFIRM_R; | ||
40 | |||
41 | } | ||
42 | |||
43 | #ifdef CONFIG_IP_DCCP_DEBUG | ||
44 | extern const char *dccp_feat_typename(const u8 type); | ||
45 | extern const char *dccp_feat_name(const u8 feat); | ||
46 | |||
47 | static inline void dccp_feat_debug(const u8 type, const u8 feat, const u8 val) | ||
48 | { | ||
49 | dccp_pr_debug("%s(%s (%d), %d)\n", dccp_feat_typename(type), | ||
50 | dccp_feat_name(feat), feat, val); | ||
51 | } | ||
52 | #else | ||
53 | #define dccp_feat_debug(type, feat, val) | ||
54 | #endif /* CONFIG_IP_DCCP_DEBUG */ | ||
18 | 55 | ||
19 | extern int dccp_feat_change(struct dccp_minisock *dmsk, u8 type, u8 feature, | 56 | extern int dccp_feat_change(struct dccp_minisock *dmsk, u8 type, u8 feature, |
20 | u8 *val, u8 len, gfp_t gfp); | 57 | u8 *val, u8 len, gfp_t gfp); |
@@ -26,11 +63,4 @@ extern void dccp_feat_clean(struct dccp_minisock *dmsk); | |||
26 | extern int dccp_feat_clone(struct sock *oldsk, struct sock *newsk); | 63 | extern int dccp_feat_clone(struct sock *oldsk, struct sock *newsk); |
27 | extern int dccp_feat_init(struct dccp_minisock *dmsk); | 64 | extern int dccp_feat_init(struct dccp_minisock *dmsk); |
28 | 65 | ||
29 | extern int dccp_feat_default_sequence_window; | ||
30 | extern int dccp_feat_default_rx_ccid; | ||
31 | extern int dccp_feat_default_tx_ccid; | ||
32 | extern int dccp_feat_default_ack_ratio; | ||
33 | extern int dccp_feat_default_send_ack_vector; | ||
34 | extern int dccp_feat_default_send_ndp_count; | ||
35 | |||
36 | #endif /* _DCCP_FEAT_H */ | 66 | #endif /* _DCCP_FEAT_H */ |
diff --git a/net/dccp/input.c b/net/dccp/input.c index 1d24881ac0ab..7371a2f3acf4 100644 --- a/net/dccp/input.c +++ b/net/dccp/input.c | |||
@@ -128,21 +128,18 @@ static int dccp_check_seqno(struct sock *sk, struct sk_buff *skb) | |||
128 | DCCP_PKT_WITHOUT_ACK_SEQ)) | 128 | DCCP_PKT_WITHOUT_ACK_SEQ)) |
129 | dp->dccps_gar = DCCP_SKB_CB(skb)->dccpd_ack_seq; | 129 | dp->dccps_gar = DCCP_SKB_CB(skb)->dccpd_ack_seq; |
130 | } else { | 130 | } else { |
131 | LIMIT_NETDEBUG(KERN_WARNING "DCCP: Step 6 failed for %s packet, " | 131 | DCCP_WARN("DCCP: Step 6 failed for %s packet, " |
132 | "(LSWL(%llu) <= P.seqno(%llu) <= S.SWH(%llu)) and " | 132 | "(LSWL(%llu) <= P.seqno(%llu) <= S.SWH(%llu)) and " |
133 | "(P.ackno %s or LAWL(%llu) <= P.ackno(%llu) <= S.AWH(%llu), " | 133 | "(P.ackno %s or LAWL(%llu) <= P.ackno(%llu) <= S.AWH(%llu), " |
134 | "sending SYNC...\n", | 134 | "sending SYNC...\n", dccp_packet_name(dh->dccph_type), |
135 | dccp_packet_name(dh->dccph_type), | 135 | (unsigned long long) lswl, |
136 | (unsigned long long) lswl, | 136 | (unsigned long long) DCCP_SKB_CB(skb)->dccpd_seq, |
137 | (unsigned long long) | 137 | (unsigned long long) dp->dccps_swh, |
138 | DCCP_SKB_CB(skb)->dccpd_seq, | 138 | (DCCP_SKB_CB(skb)->dccpd_ack_seq == |
139 | (unsigned long long) dp->dccps_swh, | ||
140 | (DCCP_SKB_CB(skb)->dccpd_ack_seq == | ||
141 | DCCP_PKT_WITHOUT_ACK_SEQ) ? "doesn't exist" : "exists", | 139 | DCCP_PKT_WITHOUT_ACK_SEQ) ? "doesn't exist" : "exists", |
142 | (unsigned long long) lawl, | 140 | (unsigned long long) lawl, |
143 | (unsigned long long) | 141 | (unsigned long long) DCCP_SKB_CB(skb)->dccpd_ack_seq, |
144 | DCCP_SKB_CB(skb)->dccpd_ack_seq, | 142 | (unsigned long long) dp->dccps_awh); |
145 | (unsigned long long) dp->dccps_awh); | ||
146 | dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq, DCCP_PKT_SYNC); | 143 | dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq, DCCP_PKT_SYNC); |
147 | return -1; | 144 | return -1; |
148 | } | 145 | } |
@@ -431,29 +428,25 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
431 | 428 | ||
432 | /* | 429 | /* |
433 | * Step 3: Process LISTEN state | 430 | * Step 3: Process LISTEN state |
434 | * (Continuing from dccp_v4_do_rcv and dccp_v6_do_rcv) | ||
435 | * | 431 | * |
436 | * If S.state == LISTEN, | 432 | * If S.state == LISTEN, |
437 | * If P.type == Request or P contains a valid Init Cookie | 433 | * If P.type == Request or P contains a valid Init Cookie option, |
438 | * option, | 434 | * (* Must scan the packet's options to check for Init |
439 | * * Must scan the packet's options to check for an Init | 435 | * Cookies. Only Init Cookies are processed here, |
440 | * Cookie. Only the Init Cookie is processed here, | 436 | * however; other options are processed in Step 8. This |
441 | * however; other options are processed in Step 8. This | 437 | * scan need only be performed if the endpoint uses Init |
442 | * scan need only be performed if the endpoint uses Init | 438 | * Cookies *) |
443 | * Cookies * | 439 | * (* Generate a new socket and switch to that socket *) |
444 | * * Generate a new socket and switch to that socket * | 440 | * Set S := new socket for this port pair |
445 | * Set S := new socket for this port pair | 441 | * S.state = RESPOND |
446 | * S.state = RESPOND | 442 | * Choose S.ISS (initial seqno) or set from Init Cookies |
447 | * Choose S.ISS (initial seqno) or set from Init Cookie | 443 | * Initialize S.GAR := S.ISS |
448 | * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie | 444 | * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init |
449 | * Continue with S.state == RESPOND | 445 | * Cookies Continue with S.state == RESPOND |
450 | * * A Response packet will be generated in Step 11 * | 446 | * (* A Response packet will be generated in Step 11 *) |
451 | * Otherwise, | 447 | * Otherwise, |
452 | * Generate Reset(No Connection) unless P.type == Reset | 448 | * Generate Reset(No Connection) unless P.type == Reset |
453 | * Drop packet and return | 449 | * Drop packet and return |
454 | * | ||
455 | * NOTE: the check for the packet types is done in | ||
456 | * dccp_rcv_state_process | ||
457 | */ | 450 | */ |
458 | if (sk->sk_state == DCCP_LISTEN) { | 451 | if (sk->sk_state == DCCP_LISTEN) { |
459 | if (dh->dccph_type == DCCP_PKT_REQUEST) { | 452 | if (dh->dccph_type == DCCP_PKT_REQUEST) { |
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index e08e7688a263..ff81679c9f17 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c | |||
@@ -113,13 +113,8 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
113 | /* OK, now commit destination to socket. */ | 113 | /* OK, now commit destination to socket. */ |
114 | sk_setup_caps(sk, &rt->u.dst); | 114 | sk_setup_caps(sk, &rt->u.dst); |
115 | 115 | ||
116 | dp->dccps_gar = | 116 | dp->dccps_iss = secure_dccp_sequence_number(inet->saddr, inet->daddr, |
117 | dp->dccps_iss = secure_dccp_sequence_number(inet->saddr, | 117 | inet->sport, inet->dport); |
118 | inet->daddr, | ||
119 | inet->sport, | ||
120 | usin->sin_port); | ||
121 | dccp_update_gss(sk, dp->dccps_iss); | ||
122 | |||
123 | inet->id = dp->dccps_iss ^ jiffies; | 118 | inet->id = dp->dccps_iss ^ jiffies; |
124 | 119 | ||
125 | err = dccp_connect(sk); | 120 | err = dccp_connect(sk); |
@@ -193,86 +188,6 @@ static inline void dccp_do_pmtu_discovery(struct sock *sk, | |||
193 | } /* else let the usual retransmit timer handle it */ | 188 | } /* else let the usual retransmit timer handle it */ |
194 | } | 189 | } |
195 | 190 | ||
196 | static void dccp_v4_reqsk_send_ack(struct sk_buff *rxskb, | ||
197 | struct request_sock *req) | ||
198 | { | ||
199 | int err; | ||
200 | struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh; | ||
201 | const u32 dccp_hdr_ack_len = sizeof(struct dccp_hdr) + | ||
202 | sizeof(struct dccp_hdr_ext) + | ||
203 | sizeof(struct dccp_hdr_ack_bits); | ||
204 | struct sk_buff *skb; | ||
205 | |||
206 | if (((struct rtable *)rxskb->dst)->rt_type != RTN_LOCAL) | ||
207 | return; | ||
208 | |||
209 | skb = alloc_skb(dccp_v4_ctl_socket->sk->sk_prot->max_header, GFP_ATOMIC); | ||
210 | if (skb == NULL) | ||
211 | return; | ||
212 | |||
213 | /* Reserve space for headers. */ | ||
214 | skb_reserve(skb, dccp_v4_ctl_socket->sk->sk_prot->max_header); | ||
215 | |||
216 | skb->dst = dst_clone(rxskb->dst); | ||
217 | |||
218 | skb->h.raw = skb_push(skb, dccp_hdr_ack_len); | ||
219 | dh = dccp_hdr(skb); | ||
220 | memset(dh, 0, dccp_hdr_ack_len); | ||
221 | |||
222 | /* Build DCCP header and checksum it. */ | ||
223 | dh->dccph_type = DCCP_PKT_ACK; | ||
224 | dh->dccph_sport = rxdh->dccph_dport; | ||
225 | dh->dccph_dport = rxdh->dccph_sport; | ||
226 | dh->dccph_doff = dccp_hdr_ack_len / 4; | ||
227 | dh->dccph_x = 1; | ||
228 | |||
229 | dccp_hdr_set_seq(dh, DCCP_SKB_CB(rxskb)->dccpd_ack_seq); | ||
230 | dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), | ||
231 | DCCP_SKB_CB(rxskb)->dccpd_seq); | ||
232 | |||
233 | bh_lock_sock(dccp_v4_ctl_socket->sk); | ||
234 | err = ip_build_and_send_pkt(skb, dccp_v4_ctl_socket->sk, | ||
235 | rxskb->nh.iph->daddr, | ||
236 | rxskb->nh.iph->saddr, NULL); | ||
237 | bh_unlock_sock(dccp_v4_ctl_socket->sk); | ||
238 | |||
239 | if (err == NET_XMIT_CN || err == 0) { | ||
240 | DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS); | ||
241 | DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS); | ||
242 | } | ||
243 | } | ||
244 | |||
245 | static int dccp_v4_send_response(struct sock *sk, struct request_sock *req, | ||
246 | struct dst_entry *dst) | ||
247 | { | ||
248 | int err = -1; | ||
249 | struct sk_buff *skb; | ||
250 | |||
251 | /* First, grab a route. */ | ||
252 | |||
253 | if (dst == NULL && (dst = inet_csk_route_req(sk, req)) == NULL) | ||
254 | goto out; | ||
255 | |||
256 | skb = dccp_make_response(sk, dst, req); | ||
257 | if (skb != NULL) { | ||
258 | const struct inet_request_sock *ireq = inet_rsk(req); | ||
259 | struct dccp_hdr *dh = dccp_hdr(skb); | ||
260 | |||
261 | dh->dccph_checksum = dccp_v4_checksum(skb, ireq->loc_addr, | ||
262 | ireq->rmt_addr); | ||
263 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); | ||
264 | err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr, | ||
265 | ireq->rmt_addr, | ||
266 | ireq->opt); | ||
267 | if (err == NET_XMIT_CN) | ||
268 | err = 0; | ||
269 | } | ||
270 | |||
271 | out: | ||
272 | dst_release(dst); | ||
273 | return err; | ||
274 | } | ||
275 | |||
276 | /* | 191 | /* |
277 | * This routine is called by the ICMP module when it gets some sort of error | 192 | * This routine is called by the ICMP module when it gets some sort of error |
278 | * condition. If err < 0 then the socket should be closed and the error | 193 | * condition. If err < 0 then the socket should be closed and the error |
@@ -329,7 +244,7 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info) | |||
329 | seq = dccp_hdr_seq(skb); | 244 | seq = dccp_hdr_seq(skb); |
330 | if (sk->sk_state != DCCP_LISTEN && | 245 | if (sk->sk_state != DCCP_LISTEN && |
331 | !between48(seq, dp->dccps_swl, dp->dccps_swh)) { | 246 | !between48(seq, dp->dccps_swl, dp->dccps_swh)) { |
332 | NET_INC_STATS(LINUX_MIB_OUTOFWINDOWICMPS); | 247 | NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS); |
333 | goto out; | 248 | goto out; |
334 | } | 249 | } |
335 | 250 | ||
@@ -429,19 +344,24 @@ out: | |||
429 | sock_put(sk); | 344 | sock_put(sk); |
430 | } | 345 | } |
431 | 346 | ||
432 | /* This routine computes an IPv4 DCCP checksum. */ | 347 | static inline __sum16 dccp_v4_csum_finish(struct sk_buff *skb, |
433 | void dccp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb) | 348 | __be32 src, __be32 dst) |
349 | { | ||
350 | return csum_tcpudp_magic(src, dst, skb->len, IPPROTO_DCCP, skb->csum); | ||
351 | } | ||
352 | |||
353 | void dccp_v4_send_check(struct sock *sk, int unused, struct sk_buff *skb) | ||
434 | { | 354 | { |
435 | const struct inet_sock *inet = inet_sk(sk); | 355 | const struct inet_sock *inet = inet_sk(sk); |
436 | struct dccp_hdr *dh = dccp_hdr(skb); | 356 | struct dccp_hdr *dh = dccp_hdr(skb); |
437 | 357 | ||
438 | dh->dccph_checksum = dccp_v4_checksum(skb, inet->saddr, inet->daddr); | 358 | dccp_csum_outgoing(skb); |
359 | dh->dccph_checksum = dccp_v4_csum_finish(skb, inet->saddr, inet->daddr); | ||
439 | } | 360 | } |
440 | 361 | ||
441 | EXPORT_SYMBOL_GPL(dccp_v4_send_check); | 362 | EXPORT_SYMBOL_GPL(dccp_v4_send_check); |
442 | 363 | ||
443 | static inline u64 dccp_v4_init_sequence(const struct sock *sk, | 364 | static inline u64 dccp_v4_init_sequence(const struct sk_buff *skb) |
444 | const struct sk_buff *skb) | ||
445 | { | 365 | { |
446 | return secure_dccp_sequence_number(skb->nh.iph->daddr, | 366 | return secure_dccp_sequence_number(skb->nh.iph->daddr, |
447 | skb->nh.iph->saddr, | 367 | skb->nh.iph->saddr, |
@@ -449,95 +369,6 @@ static inline u64 dccp_v4_init_sequence(const struct sock *sk, | |||
449 | dccp_hdr(skb)->dccph_sport); | 369 | dccp_hdr(skb)->dccph_sport); |
450 | } | 370 | } |
451 | 371 | ||
452 | static struct request_sock_ops dccp_request_sock_ops; | ||
453 | |||
454 | int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | ||
455 | { | ||
456 | struct inet_request_sock *ireq; | ||
457 | struct dccp_sock dp; | ||
458 | struct request_sock *req; | ||
459 | struct dccp_request_sock *dreq; | ||
460 | const __be32 saddr = skb->nh.iph->saddr; | ||
461 | const __be32 daddr = skb->nh.iph->daddr; | ||
462 | const __be32 service = dccp_hdr_request(skb)->dccph_req_service; | ||
463 | struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); | ||
464 | __u8 reset_code = DCCP_RESET_CODE_TOO_BUSY; | ||
465 | |||
466 | /* Never answer to DCCP_PKT_REQUESTs send to broadcast or multicast */ | ||
467 | if (((struct rtable *)skb->dst)->rt_flags & | ||
468 | (RTCF_BROADCAST | RTCF_MULTICAST)) { | ||
469 | reset_code = DCCP_RESET_CODE_NO_CONNECTION; | ||
470 | goto drop; | ||
471 | } | ||
472 | |||
473 | if (dccp_bad_service_code(sk, service)) { | ||
474 | reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE; | ||
475 | goto drop; | ||
476 | } | ||
477 | /* | ||
478 | * TW buckets are converted to open requests without | ||
479 | * limitations, they conserve resources and peer is | ||
480 | * evidently real one. | ||
481 | */ | ||
482 | if (inet_csk_reqsk_queue_is_full(sk)) | ||
483 | goto drop; | ||
484 | |||
485 | /* | ||
486 | * Accept backlog is full. If we have already queued enough | ||
487 | * of warm entries in syn queue, drop request. It is better than | ||
488 | * clogging syn queue with openreqs with exponentially increasing | ||
489 | * timeout. | ||
490 | */ | ||
491 | if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) | ||
492 | goto drop; | ||
493 | |||
494 | req = reqsk_alloc(&dccp_request_sock_ops); | ||
495 | if (req == NULL) | ||
496 | goto drop; | ||
497 | |||
498 | if (dccp_parse_options(sk, skb)) | ||
499 | goto drop_and_free; | ||
500 | |||
501 | dccp_openreq_init(req, &dp, skb); | ||
502 | |||
503 | if (security_inet_conn_request(sk, skb, req)) | ||
504 | goto drop_and_free; | ||
505 | |||
506 | ireq = inet_rsk(req); | ||
507 | ireq->loc_addr = daddr; | ||
508 | ireq->rmt_addr = saddr; | ||
509 | req->rcv_wnd = dccp_feat_default_sequence_window; | ||
510 | ireq->opt = NULL; | ||
511 | |||
512 | /* | ||
513 | * Step 3: Process LISTEN state | ||
514 | * | ||
515 | * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie | ||
516 | * | ||
517 | * In fact we defer setting S.GSR, S.SWL, S.SWH to | ||
518 | * dccp_create_openreq_child. | ||
519 | */ | ||
520 | dreq = dccp_rsk(req); | ||
521 | dreq->dreq_isr = dcb->dccpd_seq; | ||
522 | dreq->dreq_iss = dccp_v4_init_sequence(sk, skb); | ||
523 | dreq->dreq_service = service; | ||
524 | |||
525 | if (dccp_v4_send_response(sk, req, NULL)) | ||
526 | goto drop_and_free; | ||
527 | |||
528 | inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT); | ||
529 | return 0; | ||
530 | |||
531 | drop_and_free: | ||
532 | reqsk_free(req); | ||
533 | drop: | ||
534 | DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS); | ||
535 | dcb->dccpd_reset_code = reset_code; | ||
536 | return -1; | ||
537 | } | ||
538 | |||
539 | EXPORT_SYMBOL_GPL(dccp_v4_conn_request); | ||
540 | |||
541 | /* | 372 | /* |
542 | * The three way handshake has completed - we got a valid ACK or DATAACK - | 373 | * The three way handshake has completed - we got a valid ACK or DATAACK - |
543 | * now create the new socket. | 374 | * now create the new socket. |
@@ -623,47 +454,6 @@ static struct sock *dccp_v4_hnd_req(struct sock *sk, struct sk_buff *skb) | |||
623 | return sk; | 454 | return sk; |
624 | } | 455 | } |
625 | 456 | ||
626 | int dccp_v4_checksum(const struct sk_buff *skb, const __be32 saddr, | ||
627 | const __be32 daddr) | ||
628 | { | ||
629 | const struct dccp_hdr* dh = dccp_hdr(skb); | ||
630 | int checksum_len; | ||
631 | u32 tmp; | ||
632 | |||
633 | if (dh->dccph_cscov == 0) | ||
634 | checksum_len = skb->len; | ||
635 | else { | ||
636 | checksum_len = (dh->dccph_cscov + dh->dccph_x) * sizeof(u32); | ||
637 | checksum_len = checksum_len < skb->len ? checksum_len : | ||
638 | skb->len; | ||
639 | } | ||
640 | |||
641 | tmp = csum_partial((unsigned char *)dh, checksum_len, 0); | ||
642 | return csum_tcpudp_magic(saddr, daddr, checksum_len, | ||
643 | IPPROTO_DCCP, tmp); | ||
644 | } | ||
645 | |||
646 | EXPORT_SYMBOL_GPL(dccp_v4_checksum); | ||
647 | |||
648 | static int dccp_v4_verify_checksum(struct sk_buff *skb, | ||
649 | const __be32 saddr, const __be32 daddr) | ||
650 | { | ||
651 | struct dccp_hdr *dh = dccp_hdr(skb); | ||
652 | int checksum_len; | ||
653 | u32 tmp; | ||
654 | |||
655 | if (dh->dccph_cscov == 0) | ||
656 | checksum_len = skb->len; | ||
657 | else { | ||
658 | checksum_len = (dh->dccph_cscov + dh->dccph_x) * sizeof(u32); | ||
659 | checksum_len = checksum_len < skb->len ? checksum_len : | ||
660 | skb->len; | ||
661 | } | ||
662 | tmp = csum_partial((unsigned char *)dh, checksum_len, 0); | ||
663 | return csum_tcpudp_magic(saddr, daddr, checksum_len, | ||
664 | IPPROTO_DCCP, tmp) == 0 ? 0 : -1; | ||
665 | } | ||
666 | |||
667 | static struct dst_entry* dccp_v4_route_skb(struct sock *sk, | 457 | static struct dst_entry* dccp_v4_route_skb(struct sock *sk, |
668 | struct sk_buff *skb) | 458 | struct sk_buff *skb) |
669 | { | 459 | { |
@@ -689,7 +479,37 @@ static struct dst_entry* dccp_v4_route_skb(struct sock *sk, | |||
689 | return &rt->u.dst; | 479 | return &rt->u.dst; |
690 | } | 480 | } |
691 | 481 | ||
692 | static void dccp_v4_ctl_send_reset(struct sk_buff *rxskb) | 482 | static int dccp_v4_send_response(struct sock *sk, struct request_sock *req, |
483 | struct dst_entry *dst) | ||
484 | { | ||
485 | int err = -1; | ||
486 | struct sk_buff *skb; | ||
487 | |||
488 | /* First, grab a route. */ | ||
489 | |||
490 | if (dst == NULL && (dst = inet_csk_route_req(sk, req)) == NULL) | ||
491 | goto out; | ||
492 | |||
493 | skb = dccp_make_response(sk, dst, req); | ||
494 | if (skb != NULL) { | ||
495 | const struct inet_request_sock *ireq = inet_rsk(req); | ||
496 | struct dccp_hdr *dh = dccp_hdr(skb); | ||
497 | |||
498 | dh->dccph_checksum = dccp_v4_csum_finish(skb, ireq->loc_addr, | ||
499 | ireq->rmt_addr); | ||
500 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); | ||
501 | err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr, | ||
502 | ireq->rmt_addr, | ||
503 | ireq->opt); | ||
504 | err = net_xmit_eval(err); | ||
505 | } | ||
506 | |||
507 | out: | ||
508 | dst_release(dst); | ||
509 | return err; | ||
510 | } | ||
511 | |||
512 | static void dccp_v4_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb) | ||
693 | { | 513 | { |
694 | int err; | 514 | int err; |
695 | struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh; | 515 | struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh; |
@@ -698,7 +518,7 @@ static void dccp_v4_ctl_send_reset(struct sk_buff *rxskb) | |||
698 | sizeof(struct dccp_hdr_reset); | 518 | sizeof(struct dccp_hdr_reset); |
699 | struct sk_buff *skb; | 519 | struct sk_buff *skb; |
700 | struct dst_entry *dst; | 520 | struct dst_entry *dst; |
701 | u64 seqno; | 521 | u64 seqno = 0; |
702 | 522 | ||
703 | /* Never send a reset in response to a reset. */ | 523 | /* Never send a reset in response to a reset. */ |
704 | if (rxdh->dccph_type == DCCP_PKT_RESET) | 524 | if (rxdh->dccph_type == DCCP_PKT_RESET) |
@@ -720,9 +540,7 @@ static void dccp_v4_ctl_send_reset(struct sk_buff *rxskb) | |||
720 | skb_reserve(skb, dccp_v4_ctl_socket->sk->sk_prot->max_header); | 540 | skb_reserve(skb, dccp_v4_ctl_socket->sk->sk_prot->max_header); |
721 | skb->dst = dst_clone(dst); | 541 | skb->dst = dst_clone(dst); |
722 | 542 | ||
723 | skb->h.raw = skb_push(skb, dccp_hdr_reset_len); | 543 | dh = dccp_zeroed_hdr(skb, dccp_hdr_reset_len); |
724 | dh = dccp_hdr(skb); | ||
725 | memset(dh, 0, dccp_hdr_reset_len); | ||
726 | 544 | ||
727 | /* Build DCCP header and checksum it. */ | 545 | /* Build DCCP header and checksum it. */ |
728 | dh->dccph_type = DCCP_PKT_RESET; | 546 | dh->dccph_type = DCCP_PKT_RESET; |
@@ -734,16 +552,15 @@ static void dccp_v4_ctl_send_reset(struct sk_buff *rxskb) | |||
734 | DCCP_SKB_CB(rxskb)->dccpd_reset_code; | 552 | DCCP_SKB_CB(rxskb)->dccpd_reset_code; |
735 | 553 | ||
736 | /* See "8.3.1. Abnormal Termination" in RFC 4340 */ | 554 | /* See "8.3.1. Abnormal Termination" in RFC 4340 */ |
737 | seqno = 0; | ||
738 | if (DCCP_SKB_CB(rxskb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ) | 555 | if (DCCP_SKB_CB(rxskb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ) |
739 | dccp_set_seqno(&seqno, DCCP_SKB_CB(rxskb)->dccpd_ack_seq + 1); | 556 | dccp_set_seqno(&seqno, DCCP_SKB_CB(rxskb)->dccpd_ack_seq + 1); |
740 | 557 | ||
741 | dccp_hdr_set_seq(dh, seqno); | 558 | dccp_hdr_set_seq(dh, seqno); |
742 | dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), | 559 | dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), DCCP_SKB_CB(rxskb)->dccpd_seq); |
743 | DCCP_SKB_CB(rxskb)->dccpd_seq); | ||
744 | 560 | ||
745 | dh->dccph_checksum = dccp_v4_checksum(skb, rxskb->nh.iph->saddr, | 561 | dccp_csum_outgoing(skb); |
746 | rxskb->nh.iph->daddr); | 562 | dh->dccph_checksum = dccp_v4_csum_finish(skb, rxskb->nh.iph->saddr, |
563 | rxskb->nh.iph->daddr); | ||
747 | 564 | ||
748 | bh_lock_sock(dccp_v4_ctl_socket->sk); | 565 | bh_lock_sock(dccp_v4_ctl_socket->sk); |
749 | err = ip_build_and_send_pkt(skb, dccp_v4_ctl_socket->sk, | 566 | err = ip_build_and_send_pkt(skb, dccp_v4_ctl_socket->sk, |
@@ -751,7 +568,7 @@ static void dccp_v4_ctl_send_reset(struct sk_buff *rxskb) | |||
751 | rxskb->nh.iph->saddr, NULL); | 568 | rxskb->nh.iph->saddr, NULL); |
752 | bh_unlock_sock(dccp_v4_ctl_socket->sk); | 569 | bh_unlock_sock(dccp_v4_ctl_socket->sk); |
753 | 570 | ||
754 | if (err == NET_XMIT_CN || err == 0) { | 571 | if (net_xmit_eval(err) == 0) { |
755 | DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS); | 572 | DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS); |
756 | DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS); | 573 | DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS); |
757 | } | 574 | } |
@@ -759,6 +576,103 @@ out: | |||
759 | dst_release(dst); | 576 | dst_release(dst); |
760 | } | 577 | } |
761 | 578 | ||
579 | static void dccp_v4_reqsk_destructor(struct request_sock *req) | ||
580 | { | ||
581 | kfree(inet_rsk(req)->opt); | ||
582 | } | ||
583 | |||
584 | static struct request_sock_ops dccp_request_sock_ops __read_mostly = { | ||
585 | .family = PF_INET, | ||
586 | .obj_size = sizeof(struct dccp_request_sock), | ||
587 | .rtx_syn_ack = dccp_v4_send_response, | ||
588 | .send_ack = dccp_reqsk_send_ack, | ||
589 | .destructor = dccp_v4_reqsk_destructor, | ||
590 | .send_reset = dccp_v4_ctl_send_reset, | ||
591 | }; | ||
592 | |||
593 | int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | ||
594 | { | ||
595 | struct inet_request_sock *ireq; | ||
596 | struct request_sock *req; | ||
597 | struct dccp_request_sock *dreq; | ||
598 | const __be32 service = dccp_hdr_request(skb)->dccph_req_service; | ||
599 | struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); | ||
600 | __u8 reset_code = DCCP_RESET_CODE_TOO_BUSY; | ||
601 | |||
602 | /* Never answer to DCCP_PKT_REQUESTs send to broadcast or multicast */ | ||
603 | if (((struct rtable *)skb->dst)->rt_flags & | ||
604 | (RTCF_BROADCAST | RTCF_MULTICAST)) { | ||
605 | reset_code = DCCP_RESET_CODE_NO_CONNECTION; | ||
606 | goto drop; | ||
607 | } | ||
608 | |||
609 | if (dccp_bad_service_code(sk, service)) { | ||
610 | reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE; | ||
611 | goto drop; | ||
612 | } | ||
613 | /* | ||
614 | * TW buckets are converted to open requests without | ||
615 | * limitations, they conserve resources and peer is | ||
616 | * evidently real one. | ||
617 | */ | ||
618 | if (inet_csk_reqsk_queue_is_full(sk)) | ||
619 | goto drop; | ||
620 | |||
621 | /* | ||
622 | * Accept backlog is full. If we have already queued enough | ||
623 | * of warm entries in syn queue, drop request. It is better than | ||
624 | * clogging syn queue with openreqs with exponentially increasing | ||
625 | * timeout. | ||
626 | */ | ||
627 | if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) | ||
628 | goto drop; | ||
629 | |||
630 | req = reqsk_alloc(&dccp_request_sock_ops); | ||
631 | if (req == NULL) | ||
632 | goto drop; | ||
633 | |||
634 | if (dccp_parse_options(sk, skb)) | ||
635 | goto drop_and_free; | ||
636 | |||
637 | dccp_reqsk_init(req, skb); | ||
638 | |||
639 | if (security_inet_conn_request(sk, skb, req)) | ||
640 | goto drop_and_free; | ||
641 | |||
642 | ireq = inet_rsk(req); | ||
643 | ireq->loc_addr = skb->nh.iph->daddr; | ||
644 | ireq->rmt_addr = skb->nh.iph->saddr; | ||
645 | ireq->opt = NULL; | ||
646 | |||
647 | /* | ||
648 | * Step 3: Process LISTEN state | ||
649 | * | ||
650 | * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie | ||
651 | * | ||
652 | * In fact we defer setting S.GSR, S.SWL, S.SWH to | ||
653 | * dccp_create_openreq_child. | ||
654 | */ | ||
655 | dreq = dccp_rsk(req); | ||
656 | dreq->dreq_isr = dcb->dccpd_seq; | ||
657 | dreq->dreq_iss = dccp_v4_init_sequence(skb); | ||
658 | dreq->dreq_service = service; | ||
659 | |||
660 | if (dccp_v4_send_response(sk, req, NULL)) | ||
661 | goto drop_and_free; | ||
662 | |||
663 | inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT); | ||
664 | return 0; | ||
665 | |||
666 | drop_and_free: | ||
667 | reqsk_free(req); | ||
668 | drop: | ||
669 | DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS); | ||
670 | dcb->dccpd_reset_code = reset_code; | ||
671 | return -1; | ||
672 | } | ||
673 | |||
674 | EXPORT_SYMBOL_GPL(dccp_v4_conn_request); | ||
675 | |||
762 | int dccp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) | 676 | int dccp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) |
763 | { | 677 | { |
764 | struct dccp_hdr *dh = dccp_hdr(skb); | 678 | struct dccp_hdr *dh = dccp_hdr(skb); |
@@ -771,24 +685,23 @@ int dccp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) | |||
771 | 685 | ||
772 | /* | 686 | /* |
773 | * Step 3: Process LISTEN state | 687 | * Step 3: Process LISTEN state |
774 | * If S.state == LISTEN, | 688 | * If P.type == Request or P contains a valid Init Cookie option, |
775 | * If P.type == Request or P contains a valid Init Cookie | 689 | * (* Must scan the packet's options to check for Init |
776 | * option, | 690 | * Cookies. Only Init Cookies are processed here, |
777 | * * Must scan the packet's options to check for an Init | 691 | * however; other options are processed in Step 8. This |
778 | * Cookie. Only the Init Cookie is processed here, | 692 | * scan need only be performed if the endpoint uses Init |
779 | * however; other options are processed in Step 8. This | 693 | * Cookies *) |
780 | * scan need only be performed if the endpoint uses Init | 694 | * (* Generate a new socket and switch to that socket *) |
781 | * Cookies * | 695 | * Set S := new socket for this port pair |
782 | * * Generate a new socket and switch to that socket * | 696 | * S.state = RESPOND |
783 | * Set S := new socket for this port pair | 697 | * Choose S.ISS (initial seqno) or set from Init Cookies |
784 | * S.state = RESPOND | 698 | * Initialize S.GAR := S.ISS |
785 | * Choose S.ISS (initial seqno) or set from Init Cookie | 699 | * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookies |
786 | * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie | 700 | * Continue with S.state == RESPOND |
787 | * Continue with S.state == RESPOND | 701 | * (* A Response packet will be generated in Step 11 *) |
788 | * * A Response packet will be generated in Step 11 * | 702 | * Otherwise, |
789 | * Otherwise, | 703 | * Generate Reset(No Connection) unless P.type == Reset |
790 | * Generate Reset(No Connection) unless P.type == Reset | 704 | * Drop packet and return |
791 | * Drop packet and return | ||
792 | * | 705 | * |
793 | * NOTE: the check for the packet types is done in | 706 | * NOTE: the check for the packet types is done in |
794 | * dccp_rcv_state_process | 707 | * dccp_rcv_state_process |
@@ -811,7 +724,7 @@ int dccp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) | |||
811 | return 0; | 724 | return 0; |
812 | 725 | ||
813 | reset: | 726 | reset: |
814 | dccp_v4_ctl_send_reset(skb); | 727 | dccp_v4_ctl_send_reset(sk, skb); |
815 | discard: | 728 | discard: |
816 | kfree_skb(skb); | 729 | kfree_skb(skb); |
817 | return 0; | 730 | return 0; |
@@ -819,60 +732,74 @@ discard: | |||
819 | 732 | ||
820 | EXPORT_SYMBOL_GPL(dccp_v4_do_rcv); | 733 | EXPORT_SYMBOL_GPL(dccp_v4_do_rcv); |
821 | 734 | ||
735 | /** | ||
736 | * dccp_invalid_packet - check for malformed packets | ||
737 | * Implements RFC 4340, 8.5: Step 1: Check header basics | ||
738 | * Packets that fail these checks are ignored and do not receive Resets. | ||
739 | */ | ||
822 | int dccp_invalid_packet(struct sk_buff *skb) | 740 | int dccp_invalid_packet(struct sk_buff *skb) |
823 | { | 741 | { |
824 | const struct dccp_hdr *dh; | 742 | const struct dccp_hdr *dh; |
743 | unsigned int cscov; | ||
825 | 744 | ||
826 | if (skb->pkt_type != PACKET_HOST) | 745 | if (skb->pkt_type != PACKET_HOST) |
827 | return 1; | 746 | return 1; |
828 | 747 | ||
748 | /* If the packet is shorter than 12 bytes, drop packet and return */ | ||
829 | if (!pskb_may_pull(skb, sizeof(struct dccp_hdr))) { | 749 | if (!pskb_may_pull(skb, sizeof(struct dccp_hdr))) { |
830 | LIMIT_NETDEBUG(KERN_WARNING "DCCP: pskb_may_pull failed\n"); | 750 | DCCP_WARN("pskb_may_pull failed\n"); |
831 | return 1; | 751 | return 1; |
832 | } | 752 | } |
833 | 753 | ||
834 | dh = dccp_hdr(skb); | 754 | dh = dccp_hdr(skb); |
835 | 755 | ||
836 | /* If the packet type is not understood, drop packet and return */ | 756 | /* If P.type is not understood, drop packet and return */ |
837 | if (dh->dccph_type >= DCCP_PKT_INVALID) { | 757 | if (dh->dccph_type >= DCCP_PKT_INVALID) { |
838 | LIMIT_NETDEBUG(KERN_WARNING "DCCP: invalid packet type\n"); | 758 | DCCP_WARN("invalid packet type\n"); |
839 | return 1; | 759 | return 1; |
840 | } | 760 | } |
841 | 761 | ||
842 | /* | 762 | /* |
843 | * If P.Data Offset is too small for packet type, or too large for | 763 | * If P.Data Offset is too small for packet type, drop packet and return |
844 | * packet, drop packet and return | ||
845 | */ | 764 | */ |
846 | if (dh->dccph_doff < dccp_hdr_len(skb) / sizeof(u32)) { | 765 | if (dh->dccph_doff < dccp_hdr_len(skb) / sizeof(u32)) { |
847 | LIMIT_NETDEBUG(KERN_WARNING "DCCP: P.Data Offset(%u) " | 766 | DCCP_WARN("P.Data Offset(%u) too small\n", dh->dccph_doff); |
848 | "too small 1\n", | ||
849 | dh->dccph_doff); | ||
850 | return 1; | 767 | return 1; |
851 | } | 768 | } |
852 | 769 | /* | |
770 | * If P.Data Offset is too too large for packet, drop packet and return | ||
771 | */ | ||
853 | if (!pskb_may_pull(skb, dh->dccph_doff * sizeof(u32))) { | 772 | if (!pskb_may_pull(skb, dh->dccph_doff * sizeof(u32))) { |
854 | LIMIT_NETDEBUG(KERN_WARNING "DCCP: P.Data Offset(%u) " | 773 | DCCP_WARN("P.Data Offset(%u) too large\n", dh->dccph_doff); |
855 | "too small 2\n", | ||
856 | dh->dccph_doff); | ||
857 | return 1; | 774 | return 1; |
858 | } | 775 | } |
859 | 776 | ||
860 | dh = dccp_hdr(skb); | ||
861 | |||
862 | /* | 777 | /* |
863 | * If P.type is not Data, Ack, or DataAck and P.X == 0 (the packet | 778 | * If P.type is not Data, Ack, or DataAck and P.X == 0 (the packet |
864 | * has short sequence numbers), drop packet and return | 779 | * has short sequence numbers), drop packet and return |
865 | */ | 780 | */ |
866 | if (dh->dccph_x == 0 && | 781 | if (dh->dccph_type >= DCCP_PKT_DATA && |
867 | dh->dccph_type != DCCP_PKT_DATA && | 782 | dh->dccph_type <= DCCP_PKT_DATAACK && dh->dccph_x == 0) { |
868 | dh->dccph_type != DCCP_PKT_ACK && | 783 | DCCP_WARN("P.type (%s) not Data || [Data]Ack, while P.X == 0\n", |
869 | dh->dccph_type != DCCP_PKT_DATAACK) { | 784 | dccp_packet_name(dh->dccph_type)); |
870 | LIMIT_NETDEBUG(KERN_WARNING "DCCP: P.type (%s) not Data, Ack " | ||
871 | "nor DataAck and P.X == 0\n", | ||
872 | dccp_packet_name(dh->dccph_type)); | ||
873 | return 1; | 785 | return 1; |
874 | } | 786 | } |
875 | 787 | ||
788 | /* | ||
789 | * If P.CsCov is too large for the packet size, drop packet and return. | ||
790 | * This must come _before_ checksumming (not as RFC 4340 suggests). | ||
791 | */ | ||
792 | cscov = dccp_csum_coverage(skb); | ||
793 | if (cscov > skb->len) { | ||
794 | DCCP_WARN("P.CsCov %u exceeds packet length %d\n", | ||
795 | dh->dccph_cscov, skb->len); | ||
796 | return 1; | ||
797 | } | ||
798 | |||
799 | /* If header checksum is incorrect, drop packet and return. | ||
800 | * (This step is completed in the AF-dependent functions.) */ | ||
801 | skb->csum = skb_checksum(skb, 0, cscov, 0); | ||
802 | |||
876 | return 0; | 803 | return 0; |
877 | } | 804 | } |
878 | 805 | ||
@@ -883,17 +810,16 @@ static int dccp_v4_rcv(struct sk_buff *skb) | |||
883 | { | 810 | { |
884 | const struct dccp_hdr *dh; | 811 | const struct dccp_hdr *dh; |
885 | struct sock *sk; | 812 | struct sock *sk; |
813 | int min_cov; | ||
886 | 814 | ||
887 | /* Step 1: Check header basics: */ | 815 | /* Step 1: Check header basics */ |
888 | 816 | ||
889 | if (dccp_invalid_packet(skb)) | 817 | if (dccp_invalid_packet(skb)) |
890 | goto discard_it; | 818 | goto discard_it; |
891 | 819 | ||
892 | /* If the header checksum is incorrect, drop packet and return */ | 820 | /* Step 1: If header checksum is incorrect, drop packet and return */ |
893 | if (dccp_v4_verify_checksum(skb, skb->nh.iph->saddr, | 821 | if (dccp_v4_csum_finish(skb, skb->nh.iph->saddr, skb->nh.iph->daddr)) { |
894 | skb->nh.iph->daddr) < 0) { | 822 | DCCP_WARN("dropped packet with invalid checksum\n"); |
895 | LIMIT_NETDEBUG(KERN_WARNING "%s: incorrect header checksum\n", | ||
896 | __FUNCTION__); | ||
897 | goto discard_it; | 823 | goto discard_it; |
898 | } | 824 | } |
899 | 825 | ||
@@ -915,8 +841,7 @@ static int dccp_v4_rcv(struct sk_buff *skb) | |||
915 | dccp_pr_debug_cat("\n"); | 841 | dccp_pr_debug_cat("\n"); |
916 | } else { | 842 | } else { |
917 | DCCP_SKB_CB(skb)->dccpd_ack_seq = dccp_hdr_ack_seq(skb); | 843 | DCCP_SKB_CB(skb)->dccpd_ack_seq = dccp_hdr_ack_seq(skb); |
918 | dccp_pr_debug_cat(", ack=%llu\n", | 844 | dccp_pr_debug_cat(", ack=%llu\n", (unsigned long long) |
919 | (unsigned long long) | ||
920 | DCCP_SKB_CB(skb)->dccpd_ack_seq); | 845 | DCCP_SKB_CB(skb)->dccpd_ack_seq); |
921 | } | 846 | } |
922 | 847 | ||
@@ -930,8 +855,6 @@ static int dccp_v4_rcv(struct sk_buff *skb) | |||
930 | /* | 855 | /* |
931 | * Step 2: | 856 | * Step 2: |
932 | * If no socket ... | 857 | * If no socket ... |
933 | * Generate Reset(No Connection) unless P.type == Reset | ||
934 | * Drop packet and return | ||
935 | */ | 858 | */ |
936 | if (sk == NULL) { | 859 | if (sk == NULL) { |
937 | dccp_pr_debug("failed to look up flow ID in table and " | 860 | dccp_pr_debug("failed to look up flow ID in table and " |
@@ -945,45 +868,55 @@ static int dccp_v4_rcv(struct sk_buff *skb) | |||
945 | * Generate Reset(No Connection) unless P.type == Reset | 868 | * Generate Reset(No Connection) unless P.type == Reset |
946 | * Drop packet and return | 869 | * Drop packet and return |
947 | */ | 870 | */ |
948 | |||
949 | if (sk->sk_state == DCCP_TIME_WAIT) { | 871 | if (sk->sk_state == DCCP_TIME_WAIT) { |
950 | dccp_pr_debug("sk->sk_state == DCCP_TIME_WAIT: " | 872 | dccp_pr_debug("sk->sk_state == DCCP_TIME_WAIT: do_time_wait\n"); |
951 | "do_time_wait\n"); | 873 | inet_twsk_put(inet_twsk(sk)); |
952 | goto do_time_wait; | 874 | goto no_dccp_socket; |
875 | } | ||
876 | |||
877 | /* | ||
878 | * RFC 4340, sec. 9.2.1: Minimum Checksum Coverage | ||
879 | * o if MinCsCov = 0, only packets with CsCov = 0 are accepted | ||
880 | * o if MinCsCov > 0, also accept packets with CsCov >= MinCsCov | ||
881 | */ | ||
882 | min_cov = dccp_sk(sk)->dccps_pcrlen; | ||
883 | if (dh->dccph_cscov && (min_cov == 0 || dh->dccph_cscov < min_cov)) { | ||
884 | dccp_pr_debug("Packet CsCov %d does not satisfy MinCsCov %d\n", | ||
885 | dh->dccph_cscov, min_cov); | ||
886 | /* FIXME: "Such packets SHOULD be reported using Data Dropped | ||
887 | * options (Section 11.7) with Drop Code 0, Protocol | ||
888 | * Constraints." */ | ||
889 | goto discard_and_relse; | ||
953 | } | 890 | } |
954 | 891 | ||
955 | if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) | 892 | if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) |
956 | goto discard_and_relse; | 893 | goto discard_and_relse; |
957 | nf_reset(skb); | 894 | nf_reset(skb); |
958 | 895 | ||
959 | return sk_receive_skb(sk, skb); | 896 | return sk_receive_skb(sk, skb, 1); |
960 | 897 | ||
961 | no_dccp_socket: | 898 | no_dccp_socket: |
962 | if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) | 899 | if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) |
963 | goto discard_it; | 900 | goto discard_it; |
964 | /* | 901 | /* |
965 | * Step 2: | 902 | * Step 2: |
903 | * If no socket ... | ||
966 | * Generate Reset(No Connection) unless P.type == Reset | 904 | * Generate Reset(No Connection) unless P.type == Reset |
967 | * Drop packet and return | 905 | * Drop packet and return |
968 | */ | 906 | */ |
969 | if (dh->dccph_type != DCCP_PKT_RESET) { | 907 | if (dh->dccph_type != DCCP_PKT_RESET) { |
970 | DCCP_SKB_CB(skb)->dccpd_reset_code = | 908 | DCCP_SKB_CB(skb)->dccpd_reset_code = |
971 | DCCP_RESET_CODE_NO_CONNECTION; | 909 | DCCP_RESET_CODE_NO_CONNECTION; |
972 | dccp_v4_ctl_send_reset(skb); | 910 | dccp_v4_ctl_send_reset(sk, skb); |
973 | } | 911 | } |
974 | 912 | ||
975 | discard_it: | 913 | discard_it: |
976 | /* Discard frame. */ | ||
977 | kfree_skb(skb); | 914 | kfree_skb(skb); |
978 | return 0; | 915 | return 0; |
979 | 916 | ||
980 | discard_and_relse: | 917 | discard_and_relse: |
981 | sock_put(sk); | 918 | sock_put(sk); |
982 | goto discard_it; | 919 | goto discard_it; |
983 | |||
984 | do_time_wait: | ||
985 | inet_twsk_put(inet_twsk(sk)); | ||
986 | goto no_dccp_socket; | ||
987 | } | 920 | } |
988 | 921 | ||
989 | static struct inet_connection_sock_af_ops dccp_ipv4_af_ops = { | 922 | static struct inet_connection_sock_af_ops dccp_ipv4_af_ops = { |
@@ -1017,20 +950,6 @@ static int dccp_v4_init_sock(struct sock *sk) | |||
1017 | return err; | 950 | return err; |
1018 | } | 951 | } |
1019 | 952 | ||
1020 | static void dccp_v4_reqsk_destructor(struct request_sock *req) | ||
1021 | { | ||
1022 | kfree(inet_rsk(req)->opt); | ||
1023 | } | ||
1024 | |||
1025 | static struct request_sock_ops dccp_request_sock_ops = { | ||
1026 | .family = PF_INET, | ||
1027 | .obj_size = sizeof(struct dccp_request_sock), | ||
1028 | .rtx_syn_ack = dccp_v4_send_response, | ||
1029 | .send_ack = dccp_v4_reqsk_send_ack, | ||
1030 | .destructor = dccp_v4_reqsk_destructor, | ||
1031 | .send_reset = dccp_v4_ctl_send_reset, | ||
1032 | }; | ||
1033 | |||
1034 | static struct timewait_sock_ops dccp_timewait_sock_ops = { | 953 | static struct timewait_sock_ops dccp_timewait_sock_ops = { |
1035 | .twsk_obj_size = sizeof(struct inet_timewait_sock), | 954 | .twsk_obj_size = sizeof(struct inet_timewait_sock), |
1036 | }; | 955 | }; |
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index fc4242c0767c..c7aaa2574f52 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c | |||
@@ -36,13 +36,6 @@ | |||
36 | /* Socket used for sending RSTs and ACKs */ | 36 | /* Socket used for sending RSTs and ACKs */ |
37 | static struct socket *dccp_v6_ctl_socket; | 37 | static struct socket *dccp_v6_ctl_socket; |
38 | 38 | ||
39 | static void dccp_v6_ctl_send_reset(struct sk_buff *skb); | ||
40 | static void dccp_v6_reqsk_send_ack(struct sk_buff *skb, | ||
41 | struct request_sock *req); | ||
42 | static void dccp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb); | ||
43 | |||
44 | static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb); | ||
45 | |||
46 | static struct inet_connection_sock_af_ops dccp_ipv6_mapped; | 39 | static struct inet_connection_sock_af_ops dccp_ipv6_mapped; |
47 | static struct inet_connection_sock_af_ops dccp_ipv6_af_ops; | 40 | static struct inet_connection_sock_af_ops dccp_ipv6_af_ops; |
48 | 41 | ||
@@ -65,205 +58,37 @@ static void dccp_v6_hash(struct sock *sk) | |||
65 | } | 58 | } |
66 | } | 59 | } |
67 | 60 | ||
68 | static inline u16 dccp_v6_check(struct dccp_hdr *dh, int len, | 61 | /* add pseudo-header to DCCP checksum stored in skb->csum */ |
69 | struct in6_addr *saddr, | 62 | static inline __sum16 dccp_v6_csum_finish(struct sk_buff *skb, |
70 | struct in6_addr *daddr, | 63 | struct in6_addr *saddr, |
71 | unsigned long base) | 64 | struct in6_addr *daddr) |
72 | { | 65 | { |
73 | return csum_ipv6_magic(saddr, daddr, len, IPPROTO_DCCP, base); | 66 | return csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_DCCP, skb->csum); |
74 | } | 67 | } |
75 | 68 | ||
76 | static __u32 dccp_v6_init_sequence(struct sock *sk, struct sk_buff *skb) | 69 | static inline void dccp_v6_send_check(struct sock *sk, int unused_value, |
70 | struct sk_buff *skb) | ||
77 | { | 71 | { |
78 | const struct dccp_hdr *dh = dccp_hdr(skb); | 72 | struct ipv6_pinfo *np = inet6_sk(sk); |
79 | 73 | struct dccp_hdr *dh = dccp_hdr(skb); | |
80 | if (skb->protocol == htons(ETH_P_IPV6)) | ||
81 | return secure_tcpv6_sequence_number(skb->nh.ipv6h->daddr.s6_addr32, | ||
82 | skb->nh.ipv6h->saddr.s6_addr32, | ||
83 | dh->dccph_dport, | ||
84 | dh->dccph_sport); | ||
85 | 74 | ||
86 | return secure_dccp_sequence_number(skb->nh.iph->daddr, | 75 | dccp_csum_outgoing(skb); |
87 | skb->nh.iph->saddr, | 76 | dh->dccph_checksum = dccp_v6_csum_finish(skb, &np->saddr, &np->daddr); |
88 | dh->dccph_dport, | ||
89 | dh->dccph_sport); | ||
90 | } | 77 | } |
91 | 78 | ||
92 | static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | 79 | static inline __u32 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr, |
93 | int addr_len) | 80 | __be16 sport, __be16 dport ) |
94 | { | 81 | { |
95 | struct sockaddr_in6 *usin = (struct sockaddr_in6 *)uaddr; | 82 | return secure_tcpv6_sequence_number(saddr, daddr, sport, dport); |
96 | struct inet_connection_sock *icsk = inet_csk(sk); | 83 | } |
97 | struct inet_sock *inet = inet_sk(sk); | ||
98 | struct ipv6_pinfo *np = inet6_sk(sk); | ||
99 | struct dccp_sock *dp = dccp_sk(sk); | ||
100 | struct in6_addr *saddr = NULL, *final_p = NULL, final; | ||
101 | struct flowi fl; | ||
102 | struct dst_entry *dst; | ||
103 | int addr_type; | ||
104 | int err; | ||
105 | |||
106 | dp->dccps_role = DCCP_ROLE_CLIENT; | ||
107 | |||
108 | if (addr_len < SIN6_LEN_RFC2133) | ||
109 | return -EINVAL; | ||
110 | |||
111 | if (usin->sin6_family != AF_INET6) | ||
112 | return -EAFNOSUPPORT; | ||
113 | |||
114 | memset(&fl, 0, sizeof(fl)); | ||
115 | |||
116 | if (np->sndflow) { | ||
117 | fl.fl6_flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK; | ||
118 | IP6_ECN_flow_init(fl.fl6_flowlabel); | ||
119 | if (fl.fl6_flowlabel & IPV6_FLOWLABEL_MASK) { | ||
120 | struct ip6_flowlabel *flowlabel; | ||
121 | flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel); | ||
122 | if (flowlabel == NULL) | ||
123 | return -EINVAL; | ||
124 | ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst); | ||
125 | fl6_sock_release(flowlabel); | ||
126 | } | ||
127 | } | ||
128 | /* | ||
129 | * connect() to INADDR_ANY means loopback (BSD'ism). | ||
130 | */ | ||
131 | if (ipv6_addr_any(&usin->sin6_addr)) | ||
132 | usin->sin6_addr.s6_addr[15] = 1; | ||
133 | |||
134 | addr_type = ipv6_addr_type(&usin->sin6_addr); | ||
135 | |||
136 | if (addr_type & IPV6_ADDR_MULTICAST) | ||
137 | return -ENETUNREACH; | ||
138 | |||
139 | if (addr_type & IPV6_ADDR_LINKLOCAL) { | ||
140 | if (addr_len >= sizeof(struct sockaddr_in6) && | ||
141 | usin->sin6_scope_id) { | ||
142 | /* If interface is set while binding, indices | ||
143 | * must coincide. | ||
144 | */ | ||
145 | if (sk->sk_bound_dev_if && | ||
146 | sk->sk_bound_dev_if != usin->sin6_scope_id) | ||
147 | return -EINVAL; | ||
148 | |||
149 | sk->sk_bound_dev_if = usin->sin6_scope_id; | ||
150 | } | ||
151 | |||
152 | /* Connect to link-local address requires an interface */ | ||
153 | if (!sk->sk_bound_dev_if) | ||
154 | return -EINVAL; | ||
155 | } | ||
156 | |||
157 | ipv6_addr_copy(&np->daddr, &usin->sin6_addr); | ||
158 | np->flow_label = fl.fl6_flowlabel; | ||
159 | |||
160 | /* | ||
161 | * DCCP over IPv4 | ||
162 | */ | ||
163 | if (addr_type == IPV6_ADDR_MAPPED) { | ||
164 | u32 exthdrlen = icsk->icsk_ext_hdr_len; | ||
165 | struct sockaddr_in sin; | ||
166 | |||
167 | SOCK_DEBUG(sk, "connect: ipv4 mapped\n"); | ||
168 | |||
169 | if (__ipv6_only_sock(sk)) | ||
170 | return -ENETUNREACH; | ||
171 | |||
172 | sin.sin_family = AF_INET; | ||
173 | sin.sin_port = usin->sin6_port; | ||
174 | sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3]; | ||
175 | |||
176 | icsk->icsk_af_ops = &dccp_ipv6_mapped; | ||
177 | sk->sk_backlog_rcv = dccp_v4_do_rcv; | ||
178 | |||
179 | err = dccp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin)); | ||
180 | if (err) { | ||
181 | icsk->icsk_ext_hdr_len = exthdrlen; | ||
182 | icsk->icsk_af_ops = &dccp_ipv6_af_ops; | ||
183 | sk->sk_backlog_rcv = dccp_v6_do_rcv; | ||
184 | goto failure; | ||
185 | } else { | ||
186 | ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF), | ||
187 | inet->saddr); | ||
188 | ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000FFFF), | ||
189 | inet->rcv_saddr); | ||
190 | } | ||
191 | |||
192 | return err; | ||
193 | } | ||
194 | |||
195 | if (!ipv6_addr_any(&np->rcv_saddr)) | ||
196 | saddr = &np->rcv_saddr; | ||
197 | |||
198 | fl.proto = IPPROTO_DCCP; | ||
199 | ipv6_addr_copy(&fl.fl6_dst, &np->daddr); | ||
200 | ipv6_addr_copy(&fl.fl6_src, saddr ? saddr : &np->saddr); | ||
201 | fl.oif = sk->sk_bound_dev_if; | ||
202 | fl.fl_ip_dport = usin->sin6_port; | ||
203 | fl.fl_ip_sport = inet->sport; | ||
204 | security_sk_classify_flow(sk, &fl); | ||
205 | |||
206 | if (np->opt != NULL && np->opt->srcrt != NULL) { | ||
207 | const struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt; | ||
208 | |||
209 | ipv6_addr_copy(&final, &fl.fl6_dst); | ||
210 | ipv6_addr_copy(&fl.fl6_dst, rt0->addr); | ||
211 | final_p = &final; | ||
212 | } | ||
213 | |||
214 | err = ip6_dst_lookup(sk, &dst, &fl); | ||
215 | if (err) | ||
216 | goto failure; | ||
217 | |||
218 | if (final_p) | ||
219 | ipv6_addr_copy(&fl.fl6_dst, final_p); | ||
220 | |||
221 | err = xfrm_lookup(&dst, &fl, sk, 0); | ||
222 | if (err < 0) | ||
223 | goto failure; | ||
224 | |||
225 | if (saddr == NULL) { | ||
226 | saddr = &fl.fl6_src; | ||
227 | ipv6_addr_copy(&np->rcv_saddr, saddr); | ||
228 | } | ||
229 | |||
230 | /* set the source address */ | ||
231 | ipv6_addr_copy(&np->saddr, saddr); | ||
232 | inet->rcv_saddr = LOOPBACK4_IPV6; | ||
233 | |||
234 | __ip6_dst_store(sk, dst, NULL, NULL); | ||
235 | |||
236 | icsk->icsk_ext_hdr_len = 0; | ||
237 | if (np->opt != NULL) | ||
238 | icsk->icsk_ext_hdr_len = (np->opt->opt_flen + | ||
239 | np->opt->opt_nflen); | ||
240 | |||
241 | inet->dport = usin->sin6_port; | ||
242 | |||
243 | dccp_set_state(sk, DCCP_REQUESTING); | ||
244 | err = inet6_hash_connect(&dccp_death_row, sk); | ||
245 | if (err) | ||
246 | goto late_failure; | ||
247 | /* FIXME */ | ||
248 | #if 0 | ||
249 | dp->dccps_gar = secure_dccp_v6_sequence_number(np->saddr.s6_addr32, | ||
250 | np->daddr.s6_addr32, | ||
251 | inet->sport, | ||
252 | inet->dport); | ||
253 | #endif | ||
254 | err = dccp_connect(sk); | ||
255 | if (err) | ||
256 | goto late_failure; | ||
257 | 84 | ||
258 | return 0; | 85 | static inline __u32 dccp_v6_init_sequence(struct sk_buff *skb) |
86 | { | ||
87 | return secure_dccpv6_sequence_number(skb->nh.ipv6h->daddr.s6_addr32, | ||
88 | skb->nh.ipv6h->saddr.s6_addr32, | ||
89 | dccp_hdr(skb)->dccph_dport, | ||
90 | dccp_hdr(skb)->dccph_sport ); | ||
259 | 91 | ||
260 | late_failure: | ||
261 | dccp_set_state(sk, DCCP_CLOSED); | ||
262 | __sk_dst_reset(sk); | ||
263 | failure: | ||
264 | inet->dport = 0; | ||
265 | sk->sk_route_caps = 0; | ||
266 | return err; | ||
267 | } | 92 | } |
268 | 93 | ||
269 | static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | 94 | static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, |
@@ -464,16 +289,12 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req, | |||
464 | if (skb != NULL) { | 289 | if (skb != NULL) { |
465 | struct dccp_hdr *dh = dccp_hdr(skb); | 290 | struct dccp_hdr *dh = dccp_hdr(skb); |
466 | 291 | ||
467 | dh->dccph_checksum = dccp_v6_check(dh, skb->len, | 292 | dh->dccph_checksum = dccp_v6_csum_finish(skb, |
468 | &ireq6->loc_addr, | 293 | &ireq6->loc_addr, |
469 | &ireq6->rmt_addr, | 294 | &ireq6->rmt_addr); |
470 | csum_partial((char *)dh, | ||
471 | skb->len, | ||
472 | skb->csum)); | ||
473 | ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr); | 295 | ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr); |
474 | err = ip6_xmit(sk, skb, &fl, opt, 0); | 296 | err = ip6_xmit(sk, skb, &fl, opt, 0); |
475 | if (err == NET_XMIT_CN) | 297 | err = net_xmit_eval(err); |
476 | err = 0; | ||
477 | } | 298 | } |
478 | 299 | ||
479 | done: | 300 | done: |
@@ -489,32 +310,7 @@ static void dccp_v6_reqsk_destructor(struct request_sock *req) | |||
489 | kfree_skb(inet6_rsk(req)->pktopts); | 310 | kfree_skb(inet6_rsk(req)->pktopts); |
490 | } | 311 | } |
491 | 312 | ||
492 | static struct request_sock_ops dccp6_request_sock_ops = { | 313 | static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb) |
493 | .family = AF_INET6, | ||
494 | .obj_size = sizeof(struct dccp6_request_sock), | ||
495 | .rtx_syn_ack = dccp_v6_send_response, | ||
496 | .send_ack = dccp_v6_reqsk_send_ack, | ||
497 | .destructor = dccp_v6_reqsk_destructor, | ||
498 | .send_reset = dccp_v6_ctl_send_reset, | ||
499 | }; | ||
500 | |||
501 | static struct timewait_sock_ops dccp6_timewait_sock_ops = { | ||
502 | .twsk_obj_size = sizeof(struct dccp6_timewait_sock), | ||
503 | }; | ||
504 | |||
505 | static void dccp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb) | ||
506 | { | ||
507 | struct ipv6_pinfo *np = inet6_sk(sk); | ||
508 | struct dccp_hdr *dh = dccp_hdr(skb); | ||
509 | |||
510 | dh->dccph_checksum = csum_ipv6_magic(&np->saddr, &np->daddr, | ||
511 | len, IPPROTO_DCCP, | ||
512 | csum_partial((char *)dh, | ||
513 | dh->dccph_doff << 2, | ||
514 | skb->csum)); | ||
515 | } | ||
516 | |||
517 | static void dccp_v6_ctl_send_reset(struct sk_buff *rxskb) | ||
518 | { | 314 | { |
519 | struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh; | 315 | struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh; |
520 | const u32 dccp_hdr_reset_len = sizeof(struct dccp_hdr) + | 316 | const u32 dccp_hdr_reset_len = sizeof(struct dccp_hdr) + |
@@ -522,7 +318,7 @@ static void dccp_v6_ctl_send_reset(struct sk_buff *rxskb) | |||
522 | sizeof(struct dccp_hdr_reset); | 318 | sizeof(struct dccp_hdr_reset); |
523 | struct sk_buff *skb; | 319 | struct sk_buff *skb; |
524 | struct flowi fl; | 320 | struct flowi fl; |
525 | u64 seqno; | 321 | u64 seqno = 0; |
526 | 322 | ||
527 | if (rxdh->dccph_type == DCCP_PKT_RESET) | 323 | if (rxdh->dccph_type == DCCP_PKT_RESET) |
528 | return; | 324 | return; |
@@ -537,9 +333,7 @@ static void dccp_v6_ctl_send_reset(struct sk_buff *rxskb) | |||
537 | 333 | ||
538 | skb_reserve(skb, dccp_v6_ctl_socket->sk->sk_prot->max_header); | 334 | skb_reserve(skb, dccp_v6_ctl_socket->sk->sk_prot->max_header); |
539 | 335 | ||
540 | skb->h.raw = skb_push(skb, dccp_hdr_reset_len); | 336 | dh = dccp_zeroed_hdr(skb, dccp_hdr_reset_len); |
541 | dh = dccp_hdr(skb); | ||
542 | memset(dh, 0, dccp_hdr_reset_len); | ||
543 | 337 | ||
544 | /* Swap the send and the receive. */ | 338 | /* Swap the send and the receive. */ |
545 | dh->dccph_type = DCCP_PKT_RESET; | 339 | dh->dccph_type = DCCP_PKT_RESET; |
@@ -551,20 +345,20 @@ static void dccp_v6_ctl_send_reset(struct sk_buff *rxskb) | |||
551 | DCCP_SKB_CB(rxskb)->dccpd_reset_code; | 345 | DCCP_SKB_CB(rxskb)->dccpd_reset_code; |
552 | 346 | ||
553 | /* See "8.3.1. Abnormal Termination" in RFC 4340 */ | 347 | /* See "8.3.1. Abnormal Termination" in RFC 4340 */ |
554 | seqno = 0; | ||
555 | if (DCCP_SKB_CB(rxskb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ) | 348 | if (DCCP_SKB_CB(rxskb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ) |
556 | dccp_set_seqno(&seqno, DCCP_SKB_CB(rxskb)->dccpd_ack_seq + 1); | 349 | dccp_set_seqno(&seqno, DCCP_SKB_CB(rxskb)->dccpd_ack_seq + 1); |
557 | 350 | ||
558 | dccp_hdr_set_seq(dh, seqno); | 351 | dccp_hdr_set_seq(dh, seqno); |
559 | dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), | 352 | dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), DCCP_SKB_CB(rxskb)->dccpd_seq); |
560 | DCCP_SKB_CB(rxskb)->dccpd_seq); | 353 | |
354 | dccp_csum_outgoing(skb); | ||
355 | dh->dccph_checksum = dccp_v6_csum_finish(skb, &rxskb->nh.ipv6h->saddr, | ||
356 | &rxskb->nh.ipv6h->daddr); | ||
561 | 357 | ||
562 | memset(&fl, 0, sizeof(fl)); | 358 | memset(&fl, 0, sizeof(fl)); |
563 | ipv6_addr_copy(&fl.fl6_dst, &rxskb->nh.ipv6h->saddr); | 359 | ipv6_addr_copy(&fl.fl6_dst, &rxskb->nh.ipv6h->saddr); |
564 | ipv6_addr_copy(&fl.fl6_src, &rxskb->nh.ipv6h->daddr); | 360 | ipv6_addr_copy(&fl.fl6_src, &rxskb->nh.ipv6h->daddr); |
565 | dh->dccph_checksum = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst, | 361 | |
566 | sizeof(*dh), IPPROTO_DCCP, | ||
567 | skb->csum); | ||
568 | fl.proto = IPPROTO_DCCP; | 362 | fl.proto = IPPROTO_DCCP; |
569 | fl.oif = inet6_iif(rxskb); | 363 | fl.oif = inet6_iif(rxskb); |
570 | fl.fl_ip_dport = dh->dccph_dport; | 364 | fl.fl_ip_dport = dh->dccph_dport; |
@@ -584,60 +378,14 @@ static void dccp_v6_ctl_send_reset(struct sk_buff *rxskb) | |||
584 | kfree_skb(skb); | 378 | kfree_skb(skb); |
585 | } | 379 | } |
586 | 380 | ||
587 | static void dccp_v6_reqsk_send_ack(struct sk_buff *rxskb, | 381 | static struct request_sock_ops dccp6_request_sock_ops = { |
588 | struct request_sock *req) | 382 | .family = AF_INET6, |
589 | { | 383 | .obj_size = sizeof(struct dccp6_request_sock), |
590 | struct flowi fl; | 384 | .rtx_syn_ack = dccp_v6_send_response, |
591 | struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh; | 385 | .send_ack = dccp_reqsk_send_ack, |
592 | const u32 dccp_hdr_ack_len = sizeof(struct dccp_hdr) + | 386 | .destructor = dccp_v6_reqsk_destructor, |
593 | sizeof(struct dccp_hdr_ext) + | 387 | .send_reset = dccp_v6_ctl_send_reset, |
594 | sizeof(struct dccp_hdr_ack_bits); | 388 | }; |
595 | struct sk_buff *skb; | ||
596 | |||
597 | skb = alloc_skb(dccp_v6_ctl_socket->sk->sk_prot->max_header, | ||
598 | GFP_ATOMIC); | ||
599 | if (skb == NULL) | ||
600 | return; | ||
601 | |||
602 | skb_reserve(skb, dccp_v6_ctl_socket->sk->sk_prot->max_header); | ||
603 | |||
604 | skb->h.raw = skb_push(skb, dccp_hdr_ack_len); | ||
605 | dh = dccp_hdr(skb); | ||
606 | memset(dh, 0, dccp_hdr_ack_len); | ||
607 | |||
608 | /* Build DCCP header and checksum it. */ | ||
609 | dh->dccph_type = DCCP_PKT_ACK; | ||
610 | dh->dccph_sport = rxdh->dccph_dport; | ||
611 | dh->dccph_dport = rxdh->dccph_sport; | ||
612 | dh->dccph_doff = dccp_hdr_ack_len / 4; | ||
613 | dh->dccph_x = 1; | ||
614 | |||
615 | dccp_hdr_set_seq(dh, DCCP_SKB_CB(rxskb)->dccpd_ack_seq); | ||
616 | dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), | ||
617 | DCCP_SKB_CB(rxskb)->dccpd_seq); | ||
618 | |||
619 | memset(&fl, 0, sizeof(fl)); | ||
620 | ipv6_addr_copy(&fl.fl6_dst, &rxskb->nh.ipv6h->saddr); | ||
621 | ipv6_addr_copy(&fl.fl6_src, &rxskb->nh.ipv6h->daddr); | ||
622 | |||
623 | /* FIXME: calculate checksum, IPv4 also should... */ | ||
624 | |||
625 | fl.proto = IPPROTO_DCCP; | ||
626 | fl.oif = inet6_iif(rxskb); | ||
627 | fl.fl_ip_dport = dh->dccph_dport; | ||
628 | fl.fl_ip_sport = dh->dccph_sport; | ||
629 | security_req_classify_flow(req, &fl); | ||
630 | |||
631 | if (!ip6_dst_lookup(NULL, &skb->dst, &fl)) { | ||
632 | if (xfrm_lookup(&skb->dst, &fl, NULL, 0) >= 0) { | ||
633 | ip6_xmit(dccp_v6_ctl_socket->sk, skb, &fl, NULL, 0); | ||
634 | DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS); | ||
635 | return; | ||
636 | } | ||
637 | } | ||
638 | |||
639 | kfree_skb(skb); | ||
640 | } | ||
641 | 389 | ||
642 | static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb) | 390 | static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb) |
643 | { | 391 | { |
@@ -672,7 +420,6 @@ static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb) | |||
672 | 420 | ||
673 | static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb) | 421 | static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb) |
674 | { | 422 | { |
675 | struct dccp_sock dp; | ||
676 | struct request_sock *req; | 423 | struct request_sock *req; |
677 | struct dccp_request_sock *dreq; | 424 | struct dccp_request_sock *dreq; |
678 | struct inet6_request_sock *ireq6; | 425 | struct inet6_request_sock *ireq6; |
@@ -704,9 +451,10 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb) | |||
704 | if (req == NULL) | 451 | if (req == NULL) |
705 | goto drop; | 452 | goto drop; |
706 | 453 | ||
707 | /* FIXME: process options */ | 454 | if (dccp_parse_options(sk, skb)) |
455 | goto drop_and_free; | ||
708 | 456 | ||
709 | dccp_openreq_init(req, &dp, skb); | 457 | dccp_reqsk_init(req, skb); |
710 | 458 | ||
711 | if (security_inet_conn_request(sk, skb, req)) | 459 | if (security_inet_conn_request(sk, skb, req)) |
712 | goto drop_and_free; | 460 | goto drop_and_free; |
@@ -714,7 +462,6 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb) | |||
714 | ireq6 = inet6_rsk(req); | 462 | ireq6 = inet6_rsk(req); |
715 | ipv6_addr_copy(&ireq6->rmt_addr, &skb->nh.ipv6h->saddr); | 463 | ipv6_addr_copy(&ireq6->rmt_addr, &skb->nh.ipv6h->saddr); |
716 | ipv6_addr_copy(&ireq6->loc_addr, &skb->nh.ipv6h->daddr); | 464 | ipv6_addr_copy(&ireq6->loc_addr, &skb->nh.ipv6h->daddr); |
717 | req->rcv_wnd = dccp_feat_default_sequence_window; | ||
718 | ireq6->pktopts = NULL; | 465 | ireq6->pktopts = NULL; |
719 | 466 | ||
720 | if (ipv6_opt_accepted(sk, skb) || | 467 | if (ipv6_opt_accepted(sk, skb) || |
@@ -733,14 +480,14 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb) | |||
733 | /* | 480 | /* |
734 | * Step 3: Process LISTEN state | 481 | * Step 3: Process LISTEN state |
735 | * | 482 | * |
736 | * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie | 483 | * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie |
737 | * | 484 | * |
738 | * In fact we defer setting S.GSR, S.SWL, S.SWH to | 485 | * In fact we defer setting S.GSR, S.SWL, S.SWH to |
739 | * dccp_create_openreq_child. | 486 | * dccp_create_openreq_child. |
740 | */ | 487 | */ |
741 | dreq = dccp_rsk(req); | 488 | dreq = dccp_rsk(req); |
742 | dreq->dreq_isr = dcb->dccpd_seq; | 489 | dreq->dreq_isr = dcb->dccpd_seq; |
743 | dreq->dreq_iss = dccp_v6_init_sequence(sk, skb); | 490 | dreq->dreq_iss = dccp_v6_init_sequence(skb); |
744 | dreq->dreq_service = service; | 491 | dreq->dreq_service = service; |
745 | 492 | ||
746 | if (dccp_v6_send_response(sk, req, NULL)) | 493 | if (dccp_v6_send_response(sk, req, NULL)) |
@@ -990,18 +737,46 @@ static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) | |||
990 | --ANK (980728) | 737 | --ANK (980728) |
991 | */ | 738 | */ |
992 | if (np->rxopt.all) | 739 | if (np->rxopt.all) |
740 | /* | ||
741 | * FIXME: Add handling of IPV6_PKTOPTIONS skb. See the comments below | ||
742 | * (wrt ipv6_pktopions) and net/ipv6/tcp_ipv6.c for an example. | ||
743 | */ | ||
993 | opt_skb = skb_clone(skb, GFP_ATOMIC); | 744 | opt_skb = skb_clone(skb, GFP_ATOMIC); |
994 | 745 | ||
995 | if (sk->sk_state == DCCP_OPEN) { /* Fast path */ | 746 | if (sk->sk_state == DCCP_OPEN) { /* Fast path */ |
996 | if (dccp_rcv_established(sk, skb, dccp_hdr(skb), skb->len)) | 747 | if (dccp_rcv_established(sk, skb, dccp_hdr(skb), skb->len)) |
997 | goto reset; | 748 | goto reset; |
998 | if (opt_skb) { | 749 | if (opt_skb) { |
999 | /* This is where we would goto ipv6_pktoptions. */ | 750 | /* XXX This is where we would goto ipv6_pktoptions. */ |
1000 | __kfree_skb(opt_skb); | 751 | __kfree_skb(opt_skb); |
1001 | } | 752 | } |
1002 | return 0; | 753 | return 0; |
1003 | } | 754 | } |
1004 | 755 | ||
756 | /* | ||
757 | * Step 3: Process LISTEN state | ||
758 | * If S.state == LISTEN, | ||
759 | * If P.type == Request or P contains a valid Init Cookie option, | ||
760 | * (* Must scan the packet's options to check for Init | ||
761 | * Cookies. Only Init Cookies are processed here, | ||
762 | * however; other options are processed in Step 8. This | ||
763 | * scan need only be performed if the endpoint uses Init | ||
764 | * Cookies *) | ||
765 | * (* Generate a new socket and switch to that socket *) | ||
766 | * Set S := new socket for this port pair | ||
767 | * S.state = RESPOND | ||
768 | * Choose S.ISS (initial seqno) or set from Init Cookies | ||
769 | * Initialize S.GAR := S.ISS | ||
770 | * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookies | ||
771 | * Continue with S.state == RESPOND | ||
772 | * (* A Response packet will be generated in Step 11 *) | ||
773 | * Otherwise, | ||
774 | * Generate Reset(No Connection) unless P.type == Reset | ||
775 | * Drop packet and return | ||
776 | * | ||
777 | * NOTE: the check for the packet types is done in | ||
778 | * dccp_rcv_state_process | ||
779 | */ | ||
1005 | if (sk->sk_state == DCCP_LISTEN) { | 780 | if (sk->sk_state == DCCP_LISTEN) { |
1006 | struct sock *nsk = dccp_v6_hnd_req(sk, skb); | 781 | struct sock *nsk = dccp_v6_hnd_req(sk, skb); |
1007 | 782 | ||
@@ -1024,13 +799,13 @@ static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) | |||
1024 | if (dccp_rcv_state_process(sk, skb, dccp_hdr(skb), skb->len)) | 799 | if (dccp_rcv_state_process(sk, skb, dccp_hdr(skb), skb->len)) |
1025 | goto reset; | 800 | goto reset; |
1026 | if (opt_skb) { | 801 | if (opt_skb) { |
1027 | /* This is where we would goto ipv6_pktoptions. */ | 802 | /* XXX This is where we would goto ipv6_pktoptions. */ |
1028 | __kfree_skb(opt_skb); | 803 | __kfree_skb(opt_skb); |
1029 | } | 804 | } |
1030 | return 0; | 805 | return 0; |
1031 | 806 | ||
1032 | reset: | 807 | reset: |
1033 | dccp_v6_ctl_send_reset(skb); | 808 | dccp_v6_ctl_send_reset(sk, skb); |
1034 | discard: | 809 | discard: |
1035 | if (opt_skb != NULL) | 810 | if (opt_skb != NULL) |
1036 | __kfree_skb(opt_skb); | 811 | __kfree_skb(opt_skb); |
@@ -1043,12 +818,20 @@ static int dccp_v6_rcv(struct sk_buff **pskb) | |||
1043 | const struct dccp_hdr *dh; | 818 | const struct dccp_hdr *dh; |
1044 | struct sk_buff *skb = *pskb; | 819 | struct sk_buff *skb = *pskb; |
1045 | struct sock *sk; | 820 | struct sock *sk; |
821 | int min_cov; | ||
1046 | 822 | ||
1047 | /* Step 1: Check header basics: */ | 823 | /* Step 1: Check header basics */ |
1048 | 824 | ||
1049 | if (dccp_invalid_packet(skb)) | 825 | if (dccp_invalid_packet(skb)) |
1050 | goto discard_it; | 826 | goto discard_it; |
1051 | 827 | ||
828 | /* Step 1: If header checksum is incorrect, drop packet and return. */ | ||
829 | if (dccp_v6_csum_finish(skb, &skb->nh.ipv6h->saddr, | ||
830 | &skb->nh.ipv6h->daddr)) { | ||
831 | DCCP_WARN("dropped packet with invalid checksum\n"); | ||
832 | goto discard_it; | ||
833 | } | ||
834 | |||
1052 | dh = dccp_hdr(skb); | 835 | dh = dccp_hdr(skb); |
1053 | 836 | ||
1054 | DCCP_SKB_CB(skb)->dccpd_seq = dccp_hdr_seq(skb); | 837 | DCCP_SKB_CB(skb)->dccpd_seq = dccp_hdr_seq(skb); |
@@ -1068,11 +851,12 @@ static int dccp_v6_rcv(struct sk_buff **pskb) | |||
1068 | /* | 851 | /* |
1069 | * Step 2: | 852 | * Step 2: |
1070 | * If no socket ... | 853 | * If no socket ... |
1071 | * Generate Reset(No Connection) unless P.type == Reset | ||
1072 | * Drop packet and return | ||
1073 | */ | 854 | */ |
1074 | if (sk == NULL) | 855 | if (sk == NULL) { |
856 | dccp_pr_debug("failed to look up flow ID in table and " | ||
857 | "get corresponding socket\n"); | ||
1075 | goto no_dccp_socket; | 858 | goto no_dccp_socket; |
859 | } | ||
1076 | 860 | ||
1077 | /* | 861 | /* |
1078 | * Step 2: | 862 | * Step 2: |
@@ -1080,43 +864,226 @@ static int dccp_v6_rcv(struct sk_buff **pskb) | |||
1080 | * Generate Reset(No Connection) unless P.type == Reset | 864 | * Generate Reset(No Connection) unless P.type == Reset |
1081 | * Drop packet and return | 865 | * Drop packet and return |
1082 | */ | 866 | */ |
1083 | if (sk->sk_state == DCCP_TIME_WAIT) | 867 | if (sk->sk_state == DCCP_TIME_WAIT) { |
1084 | goto do_time_wait; | 868 | dccp_pr_debug("sk->sk_state == DCCP_TIME_WAIT: do_time_wait\n"); |
869 | inet_twsk_put(inet_twsk(sk)); | ||
870 | goto no_dccp_socket; | ||
871 | } | ||
872 | |||
873 | /* | ||
874 | * RFC 4340, sec. 9.2.1: Minimum Checksum Coverage | ||
875 | * o if MinCsCov = 0, only packets with CsCov = 0 are accepted | ||
876 | * o if MinCsCov > 0, also accept packets with CsCov >= MinCsCov | ||
877 | */ | ||
878 | min_cov = dccp_sk(sk)->dccps_pcrlen; | ||
879 | if (dh->dccph_cscov && (min_cov == 0 || dh->dccph_cscov < min_cov)) { | ||
880 | dccp_pr_debug("Packet CsCov %d does not satisfy MinCsCov %d\n", | ||
881 | dh->dccph_cscov, min_cov); | ||
882 | /* FIXME: send Data Dropped option (see also dccp_v4_rcv) */ | ||
883 | goto discard_and_relse; | ||
884 | } | ||
1085 | 885 | ||
1086 | if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) | 886 | if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) |
1087 | goto discard_and_relse; | 887 | goto discard_and_relse; |
1088 | 888 | ||
1089 | return sk_receive_skb(sk, skb) ? -1 : 0; | 889 | return sk_receive_skb(sk, skb, 1) ? -1 : 0; |
1090 | 890 | ||
1091 | no_dccp_socket: | 891 | no_dccp_socket: |
1092 | if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) | 892 | if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) |
1093 | goto discard_it; | 893 | goto discard_it; |
1094 | /* | 894 | /* |
1095 | * Step 2: | 895 | * Step 2: |
896 | * If no socket ... | ||
1096 | * Generate Reset(No Connection) unless P.type == Reset | 897 | * Generate Reset(No Connection) unless P.type == Reset |
1097 | * Drop packet and return | 898 | * Drop packet and return |
1098 | */ | 899 | */ |
1099 | if (dh->dccph_type != DCCP_PKT_RESET) { | 900 | if (dh->dccph_type != DCCP_PKT_RESET) { |
1100 | DCCP_SKB_CB(skb)->dccpd_reset_code = | 901 | DCCP_SKB_CB(skb)->dccpd_reset_code = |
1101 | DCCP_RESET_CODE_NO_CONNECTION; | 902 | DCCP_RESET_CODE_NO_CONNECTION; |
1102 | dccp_v6_ctl_send_reset(skb); | 903 | dccp_v6_ctl_send_reset(sk, skb); |
1103 | } | 904 | } |
1104 | discard_it: | ||
1105 | |||
1106 | /* | ||
1107 | * Discard frame | ||
1108 | */ | ||
1109 | 905 | ||
906 | discard_it: | ||
1110 | kfree_skb(skb); | 907 | kfree_skb(skb); |
1111 | return 0; | 908 | return 0; |
1112 | 909 | ||
1113 | discard_and_relse: | 910 | discard_and_relse: |
1114 | sock_put(sk); | 911 | sock_put(sk); |
1115 | goto discard_it; | 912 | goto discard_it; |
913 | } | ||
1116 | 914 | ||
1117 | do_time_wait: | 915 | static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr, |
1118 | inet_twsk_put(inet_twsk(sk)); | 916 | int addr_len) |
1119 | goto no_dccp_socket; | 917 | { |
918 | struct sockaddr_in6 *usin = (struct sockaddr_in6 *)uaddr; | ||
919 | struct inet_connection_sock *icsk = inet_csk(sk); | ||
920 | struct inet_sock *inet = inet_sk(sk); | ||
921 | struct ipv6_pinfo *np = inet6_sk(sk); | ||
922 | struct dccp_sock *dp = dccp_sk(sk); | ||
923 | struct in6_addr *saddr = NULL, *final_p = NULL, final; | ||
924 | struct flowi fl; | ||
925 | struct dst_entry *dst; | ||
926 | int addr_type; | ||
927 | int err; | ||
928 | |||
929 | dp->dccps_role = DCCP_ROLE_CLIENT; | ||
930 | |||
931 | if (addr_len < SIN6_LEN_RFC2133) | ||
932 | return -EINVAL; | ||
933 | |||
934 | if (usin->sin6_family != AF_INET6) | ||
935 | return -EAFNOSUPPORT; | ||
936 | |||
937 | memset(&fl, 0, sizeof(fl)); | ||
938 | |||
939 | if (np->sndflow) { | ||
940 | fl.fl6_flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK; | ||
941 | IP6_ECN_flow_init(fl.fl6_flowlabel); | ||
942 | if (fl.fl6_flowlabel & IPV6_FLOWLABEL_MASK) { | ||
943 | struct ip6_flowlabel *flowlabel; | ||
944 | flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel); | ||
945 | if (flowlabel == NULL) | ||
946 | return -EINVAL; | ||
947 | ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst); | ||
948 | fl6_sock_release(flowlabel); | ||
949 | } | ||
950 | } | ||
951 | /* | ||
952 | * connect() to INADDR_ANY means loopback (BSD'ism). | ||
953 | */ | ||
954 | if (ipv6_addr_any(&usin->sin6_addr)) | ||
955 | usin->sin6_addr.s6_addr[15] = 1; | ||
956 | |||
957 | addr_type = ipv6_addr_type(&usin->sin6_addr); | ||
958 | |||
959 | if (addr_type & IPV6_ADDR_MULTICAST) | ||
960 | return -ENETUNREACH; | ||
961 | |||
962 | if (addr_type & IPV6_ADDR_LINKLOCAL) { | ||
963 | if (addr_len >= sizeof(struct sockaddr_in6) && | ||
964 | usin->sin6_scope_id) { | ||
965 | /* If interface is set while binding, indices | ||
966 | * must coincide. | ||
967 | */ | ||
968 | if (sk->sk_bound_dev_if && | ||
969 | sk->sk_bound_dev_if != usin->sin6_scope_id) | ||
970 | return -EINVAL; | ||
971 | |||
972 | sk->sk_bound_dev_if = usin->sin6_scope_id; | ||
973 | } | ||
974 | |||
975 | /* Connect to link-local address requires an interface */ | ||
976 | if (!sk->sk_bound_dev_if) | ||
977 | return -EINVAL; | ||
978 | } | ||
979 | |||
980 | ipv6_addr_copy(&np->daddr, &usin->sin6_addr); | ||
981 | np->flow_label = fl.fl6_flowlabel; | ||
982 | |||
983 | /* | ||
984 | * DCCP over IPv4 | ||
985 | */ | ||
986 | if (addr_type == IPV6_ADDR_MAPPED) { | ||
987 | u32 exthdrlen = icsk->icsk_ext_hdr_len; | ||
988 | struct sockaddr_in sin; | ||
989 | |||
990 | SOCK_DEBUG(sk, "connect: ipv4 mapped\n"); | ||
991 | |||
992 | if (__ipv6_only_sock(sk)) | ||
993 | return -ENETUNREACH; | ||
994 | |||
995 | sin.sin_family = AF_INET; | ||
996 | sin.sin_port = usin->sin6_port; | ||
997 | sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3]; | ||
998 | |||
999 | icsk->icsk_af_ops = &dccp_ipv6_mapped; | ||
1000 | sk->sk_backlog_rcv = dccp_v4_do_rcv; | ||
1001 | |||
1002 | err = dccp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin)); | ||
1003 | if (err) { | ||
1004 | icsk->icsk_ext_hdr_len = exthdrlen; | ||
1005 | icsk->icsk_af_ops = &dccp_ipv6_af_ops; | ||
1006 | sk->sk_backlog_rcv = dccp_v6_do_rcv; | ||
1007 | goto failure; | ||
1008 | } else { | ||
1009 | ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF), | ||
1010 | inet->saddr); | ||
1011 | ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000FFFF), | ||
1012 | inet->rcv_saddr); | ||
1013 | } | ||
1014 | |||
1015 | return err; | ||
1016 | } | ||
1017 | |||
1018 | if (!ipv6_addr_any(&np->rcv_saddr)) | ||
1019 | saddr = &np->rcv_saddr; | ||
1020 | |||
1021 | fl.proto = IPPROTO_DCCP; | ||
1022 | ipv6_addr_copy(&fl.fl6_dst, &np->daddr); | ||
1023 | ipv6_addr_copy(&fl.fl6_src, saddr ? saddr : &np->saddr); | ||
1024 | fl.oif = sk->sk_bound_dev_if; | ||
1025 | fl.fl_ip_dport = usin->sin6_port; | ||
1026 | fl.fl_ip_sport = inet->sport; | ||
1027 | security_sk_classify_flow(sk, &fl); | ||
1028 | |||
1029 | if (np->opt != NULL && np->opt->srcrt != NULL) { | ||
1030 | const struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt; | ||
1031 | |||
1032 | ipv6_addr_copy(&final, &fl.fl6_dst); | ||
1033 | ipv6_addr_copy(&fl.fl6_dst, rt0->addr); | ||
1034 | final_p = &final; | ||
1035 | } | ||
1036 | |||
1037 | err = ip6_dst_lookup(sk, &dst, &fl); | ||
1038 | if (err) | ||
1039 | goto failure; | ||
1040 | |||
1041 | if (final_p) | ||
1042 | ipv6_addr_copy(&fl.fl6_dst, final_p); | ||
1043 | |||
1044 | err = xfrm_lookup(&dst, &fl, sk, 0); | ||
1045 | if (err < 0) | ||
1046 | goto failure; | ||
1047 | |||
1048 | if (saddr == NULL) { | ||
1049 | saddr = &fl.fl6_src; | ||
1050 | ipv6_addr_copy(&np->rcv_saddr, saddr); | ||
1051 | } | ||
1052 | |||
1053 | /* set the source address */ | ||
1054 | ipv6_addr_copy(&np->saddr, saddr); | ||
1055 | inet->rcv_saddr = LOOPBACK4_IPV6; | ||
1056 | |||
1057 | __ip6_dst_store(sk, dst, NULL, NULL); | ||
1058 | |||
1059 | icsk->icsk_ext_hdr_len = 0; | ||
1060 | if (np->opt != NULL) | ||
1061 | icsk->icsk_ext_hdr_len = (np->opt->opt_flen + | ||
1062 | np->opt->opt_nflen); | ||
1063 | |||
1064 | inet->dport = usin->sin6_port; | ||
1065 | |||
1066 | dccp_set_state(sk, DCCP_REQUESTING); | ||
1067 | err = inet6_hash_connect(&dccp_death_row, sk); | ||
1068 | if (err) | ||
1069 | goto late_failure; | ||
1070 | |||
1071 | dp->dccps_iss = secure_dccpv6_sequence_number(np->saddr.s6_addr32, | ||
1072 | np->daddr.s6_addr32, | ||
1073 | inet->sport, inet->dport); | ||
1074 | err = dccp_connect(sk); | ||
1075 | if (err) | ||
1076 | goto late_failure; | ||
1077 | |||
1078 | return 0; | ||
1079 | |||
1080 | late_failure: | ||
1081 | dccp_set_state(sk, DCCP_CLOSED); | ||
1082 | __sk_dst_reset(sk); | ||
1083 | failure: | ||
1084 | inet->dport = 0; | ||
1085 | sk->sk_route_caps = 0; | ||
1086 | return err; | ||
1120 | } | 1087 | } |
1121 | 1088 | ||
1122 | static struct inet_connection_sock_af_ops dccp_ipv6_af_ops = { | 1089 | static struct inet_connection_sock_af_ops dccp_ipv6_af_ops = { |
@@ -1179,6 +1146,10 @@ static int dccp_v6_destroy_sock(struct sock *sk) | |||
1179 | return inet6_destroy_sock(sk); | 1146 | return inet6_destroy_sock(sk); |
1180 | } | 1147 | } |
1181 | 1148 | ||
1149 | static struct timewait_sock_ops dccp6_timewait_sock_ops = { | ||
1150 | .twsk_obj_size = sizeof(struct dccp6_timewait_sock), | ||
1151 | }; | ||
1152 | |||
1182 | static struct proto dccp_v6_prot = { | 1153 | static struct proto dccp_v6_prot = { |
1183 | .name = "DCCPv6", | 1154 | .name = "DCCPv6", |
1184 | .owner = THIS_MODULE, | 1155 | .owner = THIS_MODULE, |
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c index 36db5be2a9e9..4c9e26775f72 100644 --- a/net/dccp/minisocks.c +++ b/net/dccp/minisocks.c | |||
@@ -11,6 +11,7 @@ | |||
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/dccp.h> | 13 | #include <linux/dccp.h> |
14 | #include <linux/kernel.h> | ||
14 | #include <linux/skbuff.h> | 15 | #include <linux/skbuff.h> |
15 | #include <linux/timer.h> | 16 | #include <linux/timer.h> |
16 | 17 | ||
@@ -82,8 +83,7 @@ void dccp_time_wait(struct sock *sk, int state, int timeo) | |||
82 | * socket up. We've got bigger problems than | 83 | * socket up. We've got bigger problems than |
83 | * non-graceful socket closings. | 84 | * non-graceful socket closings. |
84 | */ | 85 | */ |
85 | LIMIT_NETDEBUG(KERN_INFO "DCCP: time wait bucket " | 86 | DCCP_WARN("time wait bucket table overflow\n"); |
86 | "table overflow\n"); | ||
87 | } | 87 | } |
88 | 88 | ||
89 | dccp_done(sk); | 89 | dccp_done(sk); |
@@ -96,8 +96,8 @@ struct sock *dccp_create_openreq_child(struct sock *sk, | |||
96 | /* | 96 | /* |
97 | * Step 3: Process LISTEN state | 97 | * Step 3: Process LISTEN state |
98 | * | 98 | * |
99 | * // Generate a new socket and switch to that socket | 99 | * (* Generate a new socket and switch to that socket *) |
100 | * Set S := new socket for this port pair | 100 | * Set S := new socket for this port pair |
101 | */ | 101 | */ |
102 | struct sock *newsk = inet_csk_clone(sk, req, GFP_ATOMIC); | 102 | struct sock *newsk = inet_csk_clone(sk, req, GFP_ATOMIC); |
103 | 103 | ||
@@ -146,9 +146,9 @@ out_free: | |||
146 | /* | 146 | /* |
147 | * Step 3: Process LISTEN state | 147 | * Step 3: Process LISTEN state |
148 | * | 148 | * |
149 | * Choose S.ISS (initial seqno) or set from Init Cookie | 149 | * Choose S.ISS (initial seqno) or set from Init Cookies |
150 | * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init | 150 | * Initialize S.GAR := S.ISS |
151 | * Cookie | 151 | * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookies |
152 | */ | 152 | */ |
153 | 153 | ||
154 | /* See dccp_v4_conn_request */ | 154 | /* See dccp_v4_conn_request */ |
@@ -194,15 +194,17 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb, | |||
194 | 194 | ||
195 | /* Check for retransmitted REQUEST */ | 195 | /* Check for retransmitted REQUEST */ |
196 | if (dccp_hdr(skb)->dccph_type == DCCP_PKT_REQUEST) { | 196 | if (dccp_hdr(skb)->dccph_type == DCCP_PKT_REQUEST) { |
197 | if (after48(DCCP_SKB_CB(skb)->dccpd_seq, | 197 | struct dccp_request_sock *dreq = dccp_rsk(req); |
198 | dccp_rsk(req)->dreq_isr)) { | ||
199 | struct dccp_request_sock *dreq = dccp_rsk(req); | ||
200 | 198 | ||
199 | if (after48(DCCP_SKB_CB(skb)->dccpd_seq, dreq->dreq_isr)) { | ||
201 | dccp_pr_debug("Retransmitted REQUEST\n"); | 200 | dccp_pr_debug("Retransmitted REQUEST\n"); |
202 | /* Send another RESPONSE packet */ | 201 | dreq->dreq_isr = DCCP_SKB_CB(skb)->dccpd_seq; |
203 | dccp_set_seqno(&dreq->dreq_iss, dreq->dreq_iss + 1); | 202 | /* |
204 | dccp_set_seqno(&dreq->dreq_isr, | 203 | * Send another RESPONSE packet |
205 | DCCP_SKB_CB(skb)->dccpd_seq); | 204 | * To protect against Request floods, increment retrans |
205 | * counter (backoff, monitored by dccp_response_timer). | ||
206 | */ | ||
207 | req->retrans++; | ||
206 | req->rsk_ops->rtx_syn_ack(sk, req, NULL); | 208 | req->rsk_ops->rtx_syn_ack(sk, req, NULL); |
207 | } | 209 | } |
208 | /* Network Duplicate, discard packet */ | 210 | /* Network Duplicate, discard packet */ |
@@ -242,7 +244,7 @@ listen_overflow: | |||
242 | DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY; | 244 | DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY; |
243 | drop: | 245 | drop: |
244 | if (dccp_hdr(skb)->dccph_type != DCCP_PKT_RESET) | 246 | if (dccp_hdr(skb)->dccph_type != DCCP_PKT_RESET) |
245 | req->rsk_ops->send_reset(skb); | 247 | req->rsk_ops->send_reset(sk, skb); |
246 | 248 | ||
247 | inet_csk_reqsk_queue_drop(sk, req, prev); | 249 | inet_csk_reqsk_queue_drop(sk, req, prev); |
248 | goto out; | 250 | goto out; |
@@ -282,3 +284,19 @@ int dccp_child_process(struct sock *parent, struct sock *child, | |||
282 | } | 284 | } |
283 | 285 | ||
284 | EXPORT_SYMBOL_GPL(dccp_child_process); | 286 | EXPORT_SYMBOL_GPL(dccp_child_process); |
287 | |||
288 | void dccp_reqsk_send_ack(struct sk_buff *skb, struct request_sock *rsk) | ||
289 | { | ||
290 | DCCP_BUG("DCCP-ACK packets are never sent in LISTEN/RESPOND state"); | ||
291 | } | ||
292 | |||
293 | EXPORT_SYMBOL_GPL(dccp_reqsk_send_ack); | ||
294 | |||
295 | void dccp_reqsk_init(struct request_sock *req, struct sk_buff *skb) | ||
296 | { | ||
297 | inet_rsk(req)->rmt_port = dccp_hdr(skb)->dccph_sport; | ||
298 | inet_rsk(req)->acked = 0; | ||
299 | req->rcv_wnd = sysctl_dccp_feat_sequence_window; | ||
300 | } | ||
301 | |||
302 | EXPORT_SYMBOL_GPL(dccp_reqsk_init); | ||
diff --git a/net/dccp/options.c b/net/dccp/options.c index fb0db1f7cd7b..f398b43bc055 100644 --- a/net/dccp/options.c +++ b/net/dccp/options.c | |||
@@ -22,23 +22,23 @@ | |||
22 | #include "dccp.h" | 22 | #include "dccp.h" |
23 | #include "feat.h" | 23 | #include "feat.h" |
24 | 24 | ||
25 | int dccp_feat_default_sequence_window = DCCPF_INITIAL_SEQUENCE_WINDOW; | 25 | int sysctl_dccp_feat_sequence_window = DCCPF_INITIAL_SEQUENCE_WINDOW; |
26 | int dccp_feat_default_rx_ccid = DCCPF_INITIAL_CCID; | 26 | int sysctl_dccp_feat_rx_ccid = DCCPF_INITIAL_CCID; |
27 | int dccp_feat_default_tx_ccid = DCCPF_INITIAL_CCID; | 27 | int sysctl_dccp_feat_tx_ccid = DCCPF_INITIAL_CCID; |
28 | int dccp_feat_default_ack_ratio = DCCPF_INITIAL_ACK_RATIO; | 28 | int sysctl_dccp_feat_ack_ratio = DCCPF_INITIAL_ACK_RATIO; |
29 | int dccp_feat_default_send_ack_vector = DCCPF_INITIAL_SEND_ACK_VECTOR; | 29 | int sysctl_dccp_feat_send_ack_vector = DCCPF_INITIAL_SEND_ACK_VECTOR; |
30 | int dccp_feat_default_send_ndp_count = DCCPF_INITIAL_SEND_NDP_COUNT; | 30 | int sysctl_dccp_feat_send_ndp_count = DCCPF_INITIAL_SEND_NDP_COUNT; |
31 | 31 | ||
32 | EXPORT_SYMBOL_GPL(dccp_feat_default_sequence_window); | 32 | EXPORT_SYMBOL_GPL(sysctl_dccp_feat_sequence_window); |
33 | 33 | ||
34 | void dccp_minisock_init(struct dccp_minisock *dmsk) | 34 | void dccp_minisock_init(struct dccp_minisock *dmsk) |
35 | { | 35 | { |
36 | dmsk->dccpms_sequence_window = dccp_feat_default_sequence_window; | 36 | dmsk->dccpms_sequence_window = sysctl_dccp_feat_sequence_window; |
37 | dmsk->dccpms_rx_ccid = dccp_feat_default_rx_ccid; | 37 | dmsk->dccpms_rx_ccid = sysctl_dccp_feat_rx_ccid; |
38 | dmsk->dccpms_tx_ccid = dccp_feat_default_tx_ccid; | 38 | dmsk->dccpms_tx_ccid = sysctl_dccp_feat_tx_ccid; |
39 | dmsk->dccpms_ack_ratio = dccp_feat_default_ack_ratio; | 39 | dmsk->dccpms_ack_ratio = sysctl_dccp_feat_ack_ratio; |
40 | dmsk->dccpms_send_ack_vector = dccp_feat_default_send_ack_vector; | 40 | dmsk->dccpms_send_ack_vector = sysctl_dccp_feat_send_ack_vector; |
41 | dmsk->dccpms_send_ndp_count = dccp_feat_default_send_ndp_count; | 41 | dmsk->dccpms_send_ndp_count = sysctl_dccp_feat_send_ndp_count; |
42 | } | 42 | } |
43 | 43 | ||
44 | static u32 dccp_decode_value_var(const unsigned char *bf, const u8 len) | 44 | static u32 dccp_decode_value_var(const unsigned char *bf, const u8 len) |
@@ -60,12 +60,9 @@ static u32 dccp_decode_value_var(const unsigned char *bf, const u8 len) | |||
60 | int dccp_parse_options(struct sock *sk, struct sk_buff *skb) | 60 | int dccp_parse_options(struct sock *sk, struct sk_buff *skb) |
61 | { | 61 | { |
62 | struct dccp_sock *dp = dccp_sk(sk); | 62 | struct dccp_sock *dp = dccp_sk(sk); |
63 | #ifdef CONFIG_IP_DCCP_DEBUG | ||
64 | const char *debug_prefix = dp->dccps_role == DCCP_ROLE_CLIENT ? | ||
65 | "CLIENT rx opt: " : "server rx opt: "; | ||
66 | #endif | ||
67 | const struct dccp_hdr *dh = dccp_hdr(skb); | 63 | const struct dccp_hdr *dh = dccp_hdr(skb); |
68 | const u8 pkt_type = DCCP_SKB_CB(skb)->dccpd_type; | 64 | const u8 pkt_type = DCCP_SKB_CB(skb)->dccpd_type; |
65 | u64 ackno = DCCP_SKB_CB(skb)->dccpd_ack_seq; | ||
69 | unsigned char *options = (unsigned char *)dh + dccp_hdr_len(skb); | 66 | unsigned char *options = (unsigned char *)dh + dccp_hdr_len(skb); |
70 | unsigned char *opt_ptr = options; | 67 | unsigned char *opt_ptr = options; |
71 | const unsigned char *opt_end = (unsigned char *)dh + | 68 | const unsigned char *opt_end = (unsigned char *)dh + |
@@ -119,7 +116,7 @@ int dccp_parse_options(struct sock *sk, struct sk_buff *skb) | |||
119 | goto out_invalid_option; | 116 | goto out_invalid_option; |
120 | 117 | ||
121 | opt_recv->dccpor_ndp = dccp_decode_value_var(value, len); | 118 | opt_recv->dccpor_ndp = dccp_decode_value_var(value, len); |
122 | dccp_pr_debug("%sNDP count=%d\n", debug_prefix, | 119 | dccp_pr_debug("%s rx opt: NDP count=%d\n", dccp_role(sk), |
123 | opt_recv->dccpor_ndp); | 120 | opt_recv->dccpor_ndp); |
124 | break; | 121 | break; |
125 | case DCCPO_CHANGE_L: | 122 | case DCCPO_CHANGE_L: |
@@ -153,7 +150,7 @@ int dccp_parse_options(struct sock *sk, struct sk_buff *skb) | |||
153 | break; | 150 | break; |
154 | 151 | ||
155 | if (dccp_msk(sk)->dccpms_send_ack_vector && | 152 | if (dccp_msk(sk)->dccpms_send_ack_vector && |
156 | dccp_ackvec_parse(sk, skb, opt, value, len)) | 153 | dccp_ackvec_parse(sk, skb, &ackno, opt, value, len)) |
157 | goto out_invalid_option; | 154 | goto out_invalid_option; |
158 | break; | 155 | break; |
159 | case DCCPO_TIMESTAMP: | 156 | case DCCPO_TIMESTAMP: |
@@ -165,8 +162,8 @@ int dccp_parse_options(struct sock *sk, struct sk_buff *skb) | |||
165 | dp->dccps_timestamp_echo = opt_recv->dccpor_timestamp; | 162 | dp->dccps_timestamp_echo = opt_recv->dccpor_timestamp; |
166 | dccp_timestamp(sk, &dp->dccps_timestamp_time); | 163 | dccp_timestamp(sk, &dp->dccps_timestamp_time); |
167 | 164 | ||
168 | dccp_pr_debug("%sTIMESTAMP=%u, ackno=%llu\n", | 165 | dccp_pr_debug("%s rx opt: TIMESTAMP=%u, ackno=%llu\n", |
169 | debug_prefix, opt_recv->dccpor_timestamp, | 166 | dccp_role(sk), opt_recv->dccpor_timestamp, |
170 | (unsigned long long) | 167 | (unsigned long long) |
171 | DCCP_SKB_CB(skb)->dccpd_ack_seq); | 168 | DCCP_SKB_CB(skb)->dccpd_ack_seq); |
172 | break; | 169 | break; |
@@ -176,8 +173,8 @@ int dccp_parse_options(struct sock *sk, struct sk_buff *skb) | |||
176 | 173 | ||
177 | opt_recv->dccpor_timestamp_echo = ntohl(*(__be32 *)value); | 174 | opt_recv->dccpor_timestamp_echo = ntohl(*(__be32 *)value); |
178 | 175 | ||
179 | dccp_pr_debug("%sTIMESTAMP_ECHO=%u, len=%d, ackno=%llu, ", | 176 | dccp_pr_debug("%s rx opt: TIMESTAMP_ECHO=%u, len=%d, " |
180 | debug_prefix, | 177 | "ackno=%llu, ", dccp_role(sk), |
181 | opt_recv->dccpor_timestamp_echo, | 178 | opt_recv->dccpor_timestamp_echo, |
182 | len + 2, | 179 | len + 2, |
183 | (unsigned long long) | 180 | (unsigned long long) |
@@ -211,8 +208,8 @@ int dccp_parse_options(struct sock *sk, struct sk_buff *skb) | |||
211 | if (elapsed_time > opt_recv->dccpor_elapsed_time) | 208 | if (elapsed_time > opt_recv->dccpor_elapsed_time) |
212 | opt_recv->dccpor_elapsed_time = elapsed_time; | 209 | opt_recv->dccpor_elapsed_time = elapsed_time; |
213 | 210 | ||
214 | dccp_pr_debug("%sELAPSED_TIME=%d\n", debug_prefix, | 211 | dccp_pr_debug("%s rx opt: ELAPSED_TIME=%d\n", |
215 | elapsed_time); | 212 | dccp_role(sk), elapsed_time); |
216 | break; | 213 | break; |
217 | /* | 214 | /* |
218 | * From RFC 4340, sec. 10.3: | 215 | * From RFC 4340, sec. 10.3: |
@@ -242,9 +239,8 @@ int dccp_parse_options(struct sock *sk, struct sk_buff *skb) | |||
242 | } | 239 | } |
243 | break; | 240 | break; |
244 | default: | 241 | default: |
245 | pr_info("DCCP(%p): option %d(len=%d) not " | 242 | DCCP_CRIT("DCCP(%p): option %d(len=%d) not " |
246 | "implemented, ignoring\n", | 243 | "implemented, ignoring", sk, opt, len); |
247 | sk, opt, len); | ||
248 | break; | 244 | break; |
249 | } | 245 | } |
250 | 246 | ||
@@ -261,7 +257,7 @@ int dccp_parse_options(struct sock *sk, struct sk_buff *skb) | |||
261 | out_invalid_option: | 257 | out_invalid_option: |
262 | DCCP_INC_STATS_BH(DCCP_MIB_INVALIDOPT); | 258 | DCCP_INC_STATS_BH(DCCP_MIB_INVALIDOPT); |
263 | DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_OPTION_ERROR; | 259 | DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_OPTION_ERROR; |
264 | pr_info("DCCP(%p): invalid option %d, len=%d\n", sk, opt, len); | 260 | DCCP_WARN("DCCP(%p): invalid option %d, len=%d", sk, opt, len); |
265 | return -1; | 261 | return -1; |
266 | } | 262 | } |
267 | 263 | ||
@@ -451,8 +447,7 @@ static int dccp_insert_feat_opt(struct sk_buff *skb, u8 type, u8 feat, | |||
451 | u8 *to; | 447 | u8 *to; |
452 | 448 | ||
453 | if (DCCP_SKB_CB(skb)->dccpd_opt_len + len + 3 > DCCP_MAX_OPT_LEN) { | 449 | if (DCCP_SKB_CB(skb)->dccpd_opt_len + len + 3 > DCCP_MAX_OPT_LEN) { |
454 | LIMIT_NETDEBUG(KERN_INFO "DCCP: packet too small" | 450 | DCCP_WARN("packet too small for feature %d option!\n", feat); |
455 | " to insert feature %d option!\n", feat); | ||
456 | return -1; | 451 | return -1; |
457 | } | 452 | } |
458 | 453 | ||
@@ -465,8 +460,10 @@ static int dccp_insert_feat_opt(struct sk_buff *skb, u8 type, u8 feat, | |||
465 | 460 | ||
466 | if (len) | 461 | if (len) |
467 | memcpy(to, val, len); | 462 | memcpy(to, val, len); |
468 | dccp_pr_debug("option %d feat %d len %d\n", type, feat, len); | ||
469 | 463 | ||
464 | dccp_pr_debug("%s(%s (%d), ...), length %d\n", | ||
465 | dccp_feat_typename(type), | ||
466 | dccp_feat_name(feat), feat, len); | ||
470 | return 0; | 467 | return 0; |
471 | } | 468 | } |
472 | 469 | ||
diff --git a/net/dccp/output.c b/net/dccp/output.c index 7102e3aed4ca..400c30b6fcae 100644 --- a/net/dccp/output.c +++ b/net/dccp/output.c | |||
@@ -88,16 +88,15 @@ static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb) | |||
88 | return -EPROTO; | 88 | return -EPROTO; |
89 | } | 89 | } |
90 | 90 | ||
91 | skb->h.raw = skb_push(skb, dccp_header_size); | ||
92 | dh = dccp_hdr(skb); | ||
93 | 91 | ||
94 | /* Build DCCP header and checksum it. */ | 92 | /* Build DCCP header and checksum it. */ |
95 | memset(dh, 0, dccp_header_size); | 93 | dh = dccp_zeroed_hdr(skb, dccp_header_size); |
96 | dh->dccph_type = dcb->dccpd_type; | 94 | dh->dccph_type = dcb->dccpd_type; |
97 | dh->dccph_sport = inet->sport; | 95 | dh->dccph_sport = inet->sport; |
98 | dh->dccph_dport = inet->dport; | 96 | dh->dccph_dport = inet->dport; |
99 | dh->dccph_doff = (dccp_header_size + dcb->dccpd_opt_len) / 4; | 97 | dh->dccph_doff = (dccp_header_size + dcb->dccpd_opt_len) / 4; |
100 | dh->dccph_ccval = dcb->dccpd_ccval; | 98 | dh->dccph_ccval = dcb->dccpd_ccval; |
99 | dh->dccph_cscov = dp->dccps_pcslen; | ||
101 | /* XXX For now we're using only 48 bits sequence numbers */ | 100 | /* XXX For now we're using only 48 bits sequence numbers */ |
102 | dh->dccph_x = 1; | 101 | dh->dccph_x = 1; |
103 | 102 | ||
@@ -117,7 +116,7 @@ static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb) | |||
117 | break; | 116 | break; |
118 | } | 117 | } |
119 | 118 | ||
120 | icsk->icsk_af_ops->send_check(sk, skb->len, skb); | 119 | icsk->icsk_af_ops->send_check(sk, 0, skb); |
121 | 120 | ||
122 | if (set_ack) | 121 | if (set_ack) |
123 | dccp_event_ack_sent(sk); | 122 | dccp_event_ack_sent(sk); |
@@ -125,17 +124,8 @@ static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb) | |||
125 | DCCP_INC_STATS(DCCP_MIB_OUTSEGS); | 124 | DCCP_INC_STATS(DCCP_MIB_OUTSEGS); |
126 | 125 | ||
127 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); | 126 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); |
128 | err = icsk->icsk_af_ops->queue_xmit(skb, 0); | 127 | err = icsk->icsk_af_ops->queue_xmit(skb, sk, 0); |
129 | if (err <= 0) | 128 | return net_xmit_eval(err); |
130 | return err; | ||
131 | |||
132 | /* NET_XMIT_CN is special. It does not guarantee, | ||
133 | * that this packet is lost. It tells that device | ||
134 | * is about to start to drop packets or already | ||
135 | * drops some packets of the same priority and | ||
136 | * invokes us to send less aggressively. | ||
137 | */ | ||
138 | return err == NET_XMIT_CN ? 0 : err; | ||
139 | } | 129 | } |
140 | return -ENOBUFS; | 130 | return -ENOBUFS; |
141 | } | 131 | } |
@@ -205,8 +195,7 @@ static int dccp_wait_for_ccid(struct sock *sk, struct sk_buff *skb, | |||
205 | if (signal_pending(current)) | 195 | if (signal_pending(current)) |
206 | goto do_interrupted; | 196 | goto do_interrupted; |
207 | 197 | ||
208 | rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb, | 198 | rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb); |
209 | skb->len); | ||
210 | if (rc <= 0) | 199 | if (rc <= 0) |
211 | break; | 200 | break; |
212 | delay = msecs_to_jiffies(rc); | 201 | delay = msecs_to_jiffies(rc); |
@@ -251,25 +240,23 @@ void dccp_write_xmit(struct sock *sk, int block) | |||
251 | { | 240 | { |
252 | struct dccp_sock *dp = dccp_sk(sk); | 241 | struct dccp_sock *dp = dccp_sk(sk); |
253 | struct sk_buff *skb; | 242 | struct sk_buff *skb; |
254 | long timeo = 30000; /* If a packet is taking longer than 2 secs | 243 | long timeo = DCCP_XMIT_TIMEO; /* If a packet is taking longer than |
255 | we have other issues */ | 244 | this we have other issues */ |
256 | 245 | ||
257 | while ((skb = skb_peek(&sk->sk_write_queue))) { | 246 | while ((skb = skb_peek(&sk->sk_write_queue))) { |
258 | int err = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb, | 247 | int err = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb); |
259 | skb->len); | ||
260 | 248 | ||
261 | if (err > 0) { | 249 | if (err > 0) { |
262 | if (!block) { | 250 | if (!block) { |
263 | sk_reset_timer(sk, &dp->dccps_xmit_timer, | 251 | sk_reset_timer(sk, &dp->dccps_xmit_timer, |
264 | msecs_to_jiffies(err)+jiffies); | 252 | msecs_to_jiffies(err)+jiffies); |
265 | break; | 253 | break; |
266 | } else | 254 | } else { |
267 | err = dccp_wait_for_ccid(sk, skb, &timeo); | 255 | err = dccp_wait_for_ccid(sk, skb, &timeo); |
268 | if (err) { | 256 | timeo = DCCP_XMIT_TIMEO; |
269 | printk(KERN_CRIT "%s:err at dccp_wait_for_ccid" | ||
270 | " %d\n", __FUNCTION__, err); | ||
271 | dump_stack(); | ||
272 | } | 257 | } |
258 | if (err) | ||
259 | DCCP_BUG("err=%d after dccp_wait_for_ccid", err); | ||
273 | } | 260 | } |
274 | 261 | ||
275 | skb_dequeue(&sk->sk_write_queue); | 262 | skb_dequeue(&sk->sk_write_queue); |
@@ -291,12 +278,9 @@ void dccp_write_xmit(struct sock *sk, int block) | |||
291 | 278 | ||
292 | err = dccp_transmit_skb(sk, skb); | 279 | err = dccp_transmit_skb(sk, skb); |
293 | ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, 0, len); | 280 | ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, 0, len); |
294 | if (err) { | 281 | if (err) |
295 | printk(KERN_CRIT "%s:err from " | 282 | DCCP_BUG("err=%d after ccid_hc_tx_packet_sent", |
296 | "ccid_hc_tx_packet_sent %d\n", | 283 | err); |
297 | __FUNCTION__, err); | ||
298 | dump_stack(); | ||
299 | } | ||
300 | } else | 284 | } else |
301 | kfree(skb); | 285 | kfree(skb); |
302 | } | 286 | } |
@@ -329,9 +313,10 @@ struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst, | |||
329 | skb_reserve(skb, sk->sk_prot->max_header); | 313 | skb_reserve(skb, sk->sk_prot->max_header); |
330 | 314 | ||
331 | skb->dst = dst_clone(dst); | 315 | skb->dst = dst_clone(dst); |
332 | skb->csum = 0; | ||
333 | 316 | ||
334 | dreq = dccp_rsk(req); | 317 | dreq = dccp_rsk(req); |
318 | if (inet_rsk(req)->acked) /* increase ISS upon retransmission */ | ||
319 | dccp_inc_seqno(&dreq->dreq_iss); | ||
335 | DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESPONSE; | 320 | DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESPONSE; |
336 | DCCP_SKB_CB(skb)->dccpd_seq = dreq->dreq_iss; | 321 | DCCP_SKB_CB(skb)->dccpd_seq = dreq->dreq_iss; |
337 | 322 | ||
@@ -340,10 +325,8 @@ struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst, | |||
340 | return NULL; | 325 | return NULL; |
341 | } | 326 | } |
342 | 327 | ||
343 | skb->h.raw = skb_push(skb, dccp_header_size); | 328 | /* Build and checksum header */ |
344 | 329 | dh = dccp_zeroed_hdr(skb, dccp_header_size); | |
345 | dh = dccp_hdr(skb); | ||
346 | memset(dh, 0, dccp_header_size); | ||
347 | 330 | ||
348 | dh->dccph_sport = inet_sk(sk)->sport; | 331 | dh->dccph_sport = inet_sk(sk)->sport; |
349 | dh->dccph_dport = inet_rsk(req)->rmt_port; | 332 | dh->dccph_dport = inet_rsk(req)->rmt_port; |
@@ -355,6 +338,10 @@ struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst, | |||
355 | dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dreq->dreq_isr); | 338 | dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dreq->dreq_isr); |
356 | dccp_hdr_response(skb)->dccph_resp_service = dreq->dreq_service; | 339 | dccp_hdr_response(skb)->dccph_resp_service = dreq->dreq_service; |
357 | 340 | ||
341 | dccp_csum_outgoing(skb); | ||
342 | |||
343 | /* We use `acked' to remember that a Response was already sent. */ | ||
344 | inet_rsk(req)->acked = 1; | ||
358 | DCCP_INC_STATS(DCCP_MIB_OUTSEGS); | 345 | DCCP_INC_STATS(DCCP_MIB_OUTSEGS); |
359 | return skb; | 346 | return skb; |
360 | } | 347 | } |
@@ -379,7 +366,6 @@ static struct sk_buff *dccp_make_reset(struct sock *sk, struct dst_entry *dst, | |||
379 | skb_reserve(skb, sk->sk_prot->max_header); | 366 | skb_reserve(skb, sk->sk_prot->max_header); |
380 | 367 | ||
381 | skb->dst = dst_clone(dst); | 368 | skb->dst = dst_clone(dst); |
382 | skb->csum = 0; | ||
383 | 369 | ||
384 | dccp_inc_seqno(&dp->dccps_gss); | 370 | dccp_inc_seqno(&dp->dccps_gss); |
385 | 371 | ||
@@ -392,10 +378,7 @@ static struct sk_buff *dccp_make_reset(struct sock *sk, struct dst_entry *dst, | |||
392 | return NULL; | 378 | return NULL; |
393 | } | 379 | } |
394 | 380 | ||
395 | skb->h.raw = skb_push(skb, dccp_header_size); | 381 | dh = dccp_zeroed_hdr(skb, dccp_header_size); |
396 | |||
397 | dh = dccp_hdr(skb); | ||
398 | memset(dh, 0, dccp_header_size); | ||
399 | 382 | ||
400 | dh->dccph_sport = inet_sk(sk)->sport; | 383 | dh->dccph_sport = inet_sk(sk)->sport; |
401 | dh->dccph_dport = inet_sk(sk)->dport; | 384 | dh->dccph_dport = inet_sk(sk)->dport; |
@@ -407,7 +390,7 @@ static struct sk_buff *dccp_make_reset(struct sock *sk, struct dst_entry *dst, | |||
407 | dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dp->dccps_gsr); | 390 | dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dp->dccps_gsr); |
408 | 391 | ||
409 | dccp_hdr_reset(skb)->dccph_reset_code = code; | 392 | dccp_hdr_reset(skb)->dccph_reset_code = code; |
410 | inet_csk(sk)->icsk_af_ops->send_check(sk, skb->len, skb); | 393 | inet_csk(sk)->icsk_af_ops->send_check(sk, 0, skb); |
411 | 394 | ||
412 | DCCP_INC_STATS(DCCP_MIB_OUTSEGS); | 395 | DCCP_INC_STATS(DCCP_MIB_OUTSEGS); |
413 | return skb; | 396 | return skb; |
@@ -426,9 +409,8 @@ int dccp_send_reset(struct sock *sk, enum dccp_reset_codes code) | |||
426 | code); | 409 | code); |
427 | if (skb != NULL) { | 410 | if (skb != NULL) { |
428 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); | 411 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); |
429 | err = inet_csk(sk)->icsk_af_ops->queue_xmit(skb, 0); | 412 | err = inet_csk(sk)->icsk_af_ops->queue_xmit(skb, sk, 0); |
430 | if (err == NET_XMIT_CN) | 413 | return net_xmit_eval(err); |
431 | err = 0; | ||
432 | } | 414 | } |
433 | } | 415 | } |
434 | 416 | ||
@@ -449,7 +431,6 @@ static inline void dccp_connect_init(struct sock *sk) | |||
449 | 431 | ||
450 | dccp_sync_mss(sk, dst_mtu(dst)); | 432 | dccp_sync_mss(sk, dst_mtu(dst)); |
451 | 433 | ||
452 | dccp_update_gss(sk, dp->dccps_iss); | ||
453 | /* | 434 | /* |
454 | * SWL and AWL are initially adjusted so that they are not less than | 435 | * SWL and AWL are initially adjusted so that they are not less than |
455 | * the initial Sequence Numbers received and sent, respectively: | 436 | * the initial Sequence Numbers received and sent, respectively: |
@@ -458,8 +439,13 @@ static inline void dccp_connect_init(struct sock *sk) | |||
458 | * These adjustments MUST be applied only at the beginning of the | 439 | * These adjustments MUST be applied only at the beginning of the |
459 | * connection. | 440 | * connection. |
460 | */ | 441 | */ |
442 | dccp_update_gss(sk, dp->dccps_iss); | ||
461 | dccp_set_seqno(&dp->dccps_awl, max48(dp->dccps_awl, dp->dccps_iss)); | 443 | dccp_set_seqno(&dp->dccps_awl, max48(dp->dccps_awl, dp->dccps_iss)); |
462 | 444 | ||
445 | /* S.GAR - greatest valid acknowledgement number received on a non-Sync; | ||
446 | * initialized to S.ISS (sec. 8.5) */ | ||
447 | dp->dccps_gar = dp->dccps_iss; | ||
448 | |||
463 | icsk->icsk_retransmits = 0; | 449 | icsk->icsk_retransmits = 0; |
464 | init_timer(&dp->dccps_xmit_timer); | 450 | init_timer(&dp->dccps_xmit_timer); |
465 | dp->dccps_xmit_timer.data = (unsigned long)sk; | 451 | dp->dccps_xmit_timer.data = (unsigned long)sk; |
@@ -481,7 +467,6 @@ int dccp_connect(struct sock *sk) | |||
481 | skb_reserve(skb, sk->sk_prot->max_header); | 467 | skb_reserve(skb, sk->sk_prot->max_header); |
482 | 468 | ||
483 | DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_REQUEST; | 469 | DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_REQUEST; |
484 | skb->csum = 0; | ||
485 | 470 | ||
486 | dccp_skb_entail(sk, skb); | 471 | dccp_skb_entail(sk, skb); |
487 | dccp_transmit_skb(sk, skb_clone(skb, GFP_KERNEL)); | 472 | dccp_transmit_skb(sk, skb_clone(skb, GFP_KERNEL)); |
@@ -513,7 +498,6 @@ void dccp_send_ack(struct sock *sk) | |||
513 | 498 | ||
514 | /* Reserve space for headers */ | 499 | /* Reserve space for headers */ |
515 | skb_reserve(skb, sk->sk_prot->max_header); | 500 | skb_reserve(skb, sk->sk_prot->max_header); |
516 | skb->csum = 0; | ||
517 | DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_ACK; | 501 | DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_ACK; |
518 | dccp_transmit_skb(sk, skb); | 502 | dccp_transmit_skb(sk, skb); |
519 | } | 503 | } |
@@ -567,7 +551,6 @@ void dccp_send_sync(struct sock *sk, const u64 seq, | |||
567 | 551 | ||
568 | /* Reserve space for headers and prepare control bits. */ | 552 | /* Reserve space for headers and prepare control bits. */ |
569 | skb_reserve(skb, sk->sk_prot->max_header); | 553 | skb_reserve(skb, sk->sk_prot->max_header); |
570 | skb->csum = 0; | ||
571 | DCCP_SKB_CB(skb)->dccpd_type = pkt_type; | 554 | DCCP_SKB_CB(skb)->dccpd_type = pkt_type; |
572 | DCCP_SKB_CB(skb)->dccpd_seq = seq; | 555 | DCCP_SKB_CB(skb)->dccpd_seq = seq; |
573 | 556 | ||
@@ -593,7 +576,6 @@ void dccp_send_close(struct sock *sk, const int active) | |||
593 | 576 | ||
594 | /* Reserve space for headers and prepare control bits. */ | 577 | /* Reserve space for headers and prepare control bits. */ |
595 | skb_reserve(skb, sk->sk_prot->max_header); | 578 | skb_reserve(skb, sk->sk_prot->max_header); |
596 | skb->csum = 0; | ||
597 | DCCP_SKB_CB(skb)->dccpd_type = dp->dccps_role == DCCP_ROLE_CLIENT ? | 579 | DCCP_SKB_CB(skb)->dccpd_type = dp->dccps_role == DCCP_ROLE_CLIENT ? |
598 | DCCP_PKT_CLOSE : DCCP_PKT_CLOSEREQ; | 580 | DCCP_PKT_CLOSE : DCCP_PKT_CLOSEREQ; |
599 | 581 | ||
diff --git a/net/dccp/probe.c b/net/dccp/probe.c index 146496fce2e2..f81e37de35d5 100644 --- a/net/dccp/probe.c +++ b/net/dccp/probe.c | |||
@@ -106,8 +106,10 @@ static int jdccp_sendmsg(struct kiocb *iocb, struct sock *sk, | |||
106 | } | 106 | } |
107 | 107 | ||
108 | static struct jprobe dccp_send_probe = { | 108 | static struct jprobe dccp_send_probe = { |
109 | .kp = { .addr = (kprobe_opcode_t *)&dccp_sendmsg, }, | 109 | .kp = { |
110 | .entry = (kprobe_opcode_t *)&jdccp_sendmsg, | 110 | .symbol_name = "dccp_sendmsg", |
111 | }, | ||
112 | .entry = JPROBE_ENTRY(jdccp_sendmsg), | ||
111 | }; | 113 | }; |
112 | 114 | ||
113 | static int dccpprobe_open(struct inode *inode, struct file *file) | 115 | static int dccpprobe_open(struct inode *inode, struct file *file) |
@@ -160,6 +162,8 @@ static __init int dccpprobe_init(void) | |||
160 | init_waitqueue_head(&dccpw.wait); | 162 | init_waitqueue_head(&dccpw.wait); |
161 | spin_lock_init(&dccpw.lock); | 163 | spin_lock_init(&dccpw.lock); |
162 | dccpw.fifo = kfifo_alloc(bufsize, GFP_KERNEL, &dccpw.lock); | 164 | dccpw.fifo = kfifo_alloc(bufsize, GFP_KERNEL, &dccpw.lock); |
165 | if (IS_ERR(dccpw.fifo)) | ||
166 | return PTR_ERR(dccpw.fifo); | ||
163 | 167 | ||
164 | if (!proc_net_fops_create(procname, S_IRUSR, &dccpprobe_fops)) | 168 | if (!proc_net_fops_create(procname, S_IRUSR, &dccpprobe_fops)) |
165 | goto err0; | 169 | goto err0; |
diff --git a/net/dccp/proto.c b/net/dccp/proto.c index 72cbdcfc2c65..5ec47d9ee447 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c | |||
@@ -52,6 +52,9 @@ struct inet_hashinfo __cacheline_aligned dccp_hashinfo = { | |||
52 | 52 | ||
53 | EXPORT_SYMBOL_GPL(dccp_hashinfo); | 53 | EXPORT_SYMBOL_GPL(dccp_hashinfo); |
54 | 54 | ||
55 | /* the maximum queue length for tx in packets. 0 is no limit */ | ||
56 | int sysctl_dccp_tx_qlen __read_mostly = 5; | ||
57 | |||
55 | void dccp_set_state(struct sock *sk, const int state) | 58 | void dccp_set_state(struct sock *sk, const int state) |
56 | { | 59 | { |
57 | const int oldstate = sk->sk_state; | 60 | const int oldstate = sk->sk_state; |
@@ -212,6 +215,7 @@ int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized) | |||
212 | 215 | ||
213 | dccp_init_xmit_timers(sk); | 216 | dccp_init_xmit_timers(sk); |
214 | icsk->icsk_rto = DCCP_TIMEOUT_INIT; | 217 | icsk->icsk_rto = DCCP_TIMEOUT_INIT; |
218 | icsk->icsk_syn_retries = sysctl_dccp_request_retries; | ||
215 | sk->sk_state = DCCP_CLOSED; | 219 | sk->sk_state = DCCP_CLOSED; |
216 | sk->sk_write_space = dccp_write_space; | 220 | sk->sk_write_space = dccp_write_space; |
217 | icsk->icsk_sync_mss = dccp_sync_mss; | 221 | icsk->icsk_sync_mss = dccp_sync_mss; |
@@ -262,12 +266,12 @@ int dccp_destroy_sock(struct sock *sk) | |||
262 | 266 | ||
263 | EXPORT_SYMBOL_GPL(dccp_destroy_sock); | 267 | EXPORT_SYMBOL_GPL(dccp_destroy_sock); |
264 | 268 | ||
265 | static inline int dccp_listen_start(struct sock *sk) | 269 | static inline int dccp_listen_start(struct sock *sk, int backlog) |
266 | { | 270 | { |
267 | struct dccp_sock *dp = dccp_sk(sk); | 271 | struct dccp_sock *dp = dccp_sk(sk); |
268 | 272 | ||
269 | dp->dccps_role = DCCP_ROLE_LISTEN; | 273 | dp->dccps_role = DCCP_ROLE_LISTEN; |
270 | return inet_csk_listen_start(sk, TCP_SYNQ_HSIZE); | 274 | return inet_csk_listen_start(sk, backlog); |
271 | } | 275 | } |
272 | 276 | ||
273 | int dccp_disconnect(struct sock *sk, int flags) | 277 | int dccp_disconnect(struct sock *sk, int flags) |
@@ -451,9 +455,8 @@ out_free_val: | |||
451 | static int do_dccp_setsockopt(struct sock *sk, int level, int optname, | 455 | static int do_dccp_setsockopt(struct sock *sk, int level, int optname, |
452 | char __user *optval, int optlen) | 456 | char __user *optval, int optlen) |
453 | { | 457 | { |
454 | struct dccp_sock *dp; | 458 | struct dccp_sock *dp = dccp_sk(sk); |
455 | int err; | 459 | int val, err = 0; |
456 | int val; | ||
457 | 460 | ||
458 | if (optlen < sizeof(int)) | 461 | if (optlen < sizeof(int)) |
459 | return -EINVAL; | 462 | return -EINVAL; |
@@ -465,14 +468,11 @@ static int do_dccp_setsockopt(struct sock *sk, int level, int optname, | |||
465 | return dccp_setsockopt_service(sk, val, optval, optlen); | 468 | return dccp_setsockopt_service(sk, val, optval, optlen); |
466 | 469 | ||
467 | lock_sock(sk); | 470 | lock_sock(sk); |
468 | dp = dccp_sk(sk); | ||
469 | err = 0; | ||
470 | |||
471 | switch (optname) { | 471 | switch (optname) { |
472 | case DCCP_SOCKOPT_PACKET_SIZE: | 472 | case DCCP_SOCKOPT_PACKET_SIZE: |
473 | dp->dccps_packet_size = val; | 473 | DCCP_WARN("sockopt(PACKET_SIZE) is deprecated: fix your app\n"); |
474 | err = 0; | ||
474 | break; | 475 | break; |
475 | |||
476 | case DCCP_SOCKOPT_CHANGE_L: | 476 | case DCCP_SOCKOPT_CHANGE_L: |
477 | if (optlen != sizeof(struct dccp_so_feat)) | 477 | if (optlen != sizeof(struct dccp_so_feat)) |
478 | err = -EINVAL; | 478 | err = -EINVAL; |
@@ -481,7 +481,6 @@ static int do_dccp_setsockopt(struct sock *sk, int level, int optname, | |||
481 | (struct dccp_so_feat __user *) | 481 | (struct dccp_so_feat __user *) |
482 | optval); | 482 | optval); |
483 | break; | 483 | break; |
484 | |||
485 | case DCCP_SOCKOPT_CHANGE_R: | 484 | case DCCP_SOCKOPT_CHANGE_R: |
486 | if (optlen != sizeof(struct dccp_so_feat)) | 485 | if (optlen != sizeof(struct dccp_so_feat)) |
487 | err = -EINVAL; | 486 | err = -EINVAL; |
@@ -490,12 +489,26 @@ static int do_dccp_setsockopt(struct sock *sk, int level, int optname, | |||
490 | (struct dccp_so_feat __user *) | 489 | (struct dccp_so_feat __user *) |
491 | optval); | 490 | optval); |
492 | break; | 491 | break; |
493 | 492 | case DCCP_SOCKOPT_SEND_CSCOV: /* sender side, RFC 4340, sec. 9.2 */ | |
493 | if (val < 0 || val > 15) | ||
494 | err = -EINVAL; | ||
495 | else | ||
496 | dp->dccps_pcslen = val; | ||
497 | break; | ||
498 | case DCCP_SOCKOPT_RECV_CSCOV: /* receiver side, RFC 4340 sec. 9.2.1 */ | ||
499 | if (val < 0 || val > 15) | ||
500 | err = -EINVAL; | ||
501 | else { | ||
502 | dp->dccps_pcrlen = val; | ||
503 | /* FIXME: add feature negotiation, | ||
504 | * ChangeL(MinimumChecksumCoverage, val) */ | ||
505 | } | ||
506 | break; | ||
494 | default: | 507 | default: |
495 | err = -ENOPROTOOPT; | 508 | err = -ENOPROTOOPT; |
496 | break; | 509 | break; |
497 | } | 510 | } |
498 | 511 | ||
499 | release_sock(sk); | 512 | release_sock(sk); |
500 | return err; | 513 | return err; |
501 | } | 514 | } |
@@ -569,12 +582,17 @@ static int do_dccp_getsockopt(struct sock *sk, int level, int optname, | |||
569 | 582 | ||
570 | switch (optname) { | 583 | switch (optname) { |
571 | case DCCP_SOCKOPT_PACKET_SIZE: | 584 | case DCCP_SOCKOPT_PACKET_SIZE: |
572 | val = dp->dccps_packet_size; | 585 | DCCP_WARN("sockopt(PACKET_SIZE) is deprecated: fix your app\n"); |
573 | len = sizeof(dp->dccps_packet_size); | 586 | return 0; |
574 | break; | ||
575 | case DCCP_SOCKOPT_SERVICE: | 587 | case DCCP_SOCKOPT_SERVICE: |
576 | return dccp_getsockopt_service(sk, len, | 588 | return dccp_getsockopt_service(sk, len, |
577 | (__be32 __user *)optval, optlen); | 589 | (__be32 __user *)optval, optlen); |
590 | case DCCP_SOCKOPT_SEND_CSCOV: | ||
591 | val = dp->dccps_pcslen; | ||
592 | break; | ||
593 | case DCCP_SOCKOPT_RECV_CSCOV: | ||
594 | val = dp->dccps_pcrlen; | ||
595 | break; | ||
578 | case 128 ... 191: | 596 | case 128 ... 191: |
579 | return ccid_hc_rx_getsockopt(dp->dccps_hc_rx_ccid, sk, optname, | 597 | return ccid_hc_rx_getsockopt(dp->dccps_hc_rx_ccid, sk, optname, |
580 | len, (u32 __user *)optval, optlen); | 598 | len, (u32 __user *)optval, optlen); |
@@ -630,6 +648,13 @@ int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
630 | return -EMSGSIZE; | 648 | return -EMSGSIZE; |
631 | 649 | ||
632 | lock_sock(sk); | 650 | lock_sock(sk); |
651 | |||
652 | if (sysctl_dccp_tx_qlen && | ||
653 | (sk->sk_write_queue.qlen >= sysctl_dccp_tx_qlen)) { | ||
654 | rc = -EAGAIN; | ||
655 | goto out_release; | ||
656 | } | ||
657 | |||
633 | timeo = sock_sndtimeo(sk, noblock); | 658 | timeo = sock_sndtimeo(sk, noblock); |
634 | 659 | ||
635 | /* | 660 | /* |
@@ -788,7 +813,7 @@ int inet_dccp_listen(struct socket *sock, int backlog) | |||
788 | * FIXME: here it probably should be sk->sk_prot->listen_start | 813 | * FIXME: here it probably should be sk->sk_prot->listen_start |
789 | * see tcp_listen_start | 814 | * see tcp_listen_start |
790 | */ | 815 | */ |
791 | err = dccp_listen_start(sk); | 816 | err = dccp_listen_start(sk, backlog); |
792 | if (err) | 817 | if (err) |
793 | goto out; | 818 | goto out; |
794 | } | 819 | } |
@@ -1008,8 +1033,7 @@ static int __init dccp_init(void) | |||
1008 | } while (!dccp_hashinfo.ehash && --ehash_order > 0); | 1033 | } while (!dccp_hashinfo.ehash && --ehash_order > 0); |
1009 | 1034 | ||
1010 | if (!dccp_hashinfo.ehash) { | 1035 | if (!dccp_hashinfo.ehash) { |
1011 | printk(KERN_CRIT "Failed to allocate DCCP " | 1036 | DCCP_CRIT("Failed to allocate DCCP established hash table"); |
1012 | "established hash table\n"); | ||
1013 | goto out_free_bind_bucket_cachep; | 1037 | goto out_free_bind_bucket_cachep; |
1014 | } | 1038 | } |
1015 | 1039 | ||
@@ -1031,7 +1055,7 @@ static int __init dccp_init(void) | |||
1031 | } while (!dccp_hashinfo.bhash && --bhash_order >= 0); | 1055 | } while (!dccp_hashinfo.bhash && --bhash_order >= 0); |
1032 | 1056 | ||
1033 | if (!dccp_hashinfo.bhash) { | 1057 | if (!dccp_hashinfo.bhash) { |
1034 | printk(KERN_CRIT "Failed to allocate DCCP bind hash table\n"); | 1058 | DCCP_CRIT("Failed to allocate DCCP bind hash table"); |
1035 | goto out_free_dccp_ehash; | 1059 | goto out_free_dccp_ehash; |
1036 | } | 1060 | } |
1037 | 1061 | ||
diff --git a/net/dccp/sysctl.c b/net/dccp/sysctl.c index 38bc157876f3..fdcfca3e9208 100644 --- a/net/dccp/sysctl.c +++ b/net/dccp/sysctl.c | |||
@@ -11,6 +11,7 @@ | |||
11 | 11 | ||
12 | #include <linux/mm.h> | 12 | #include <linux/mm.h> |
13 | #include <linux/sysctl.h> | 13 | #include <linux/sysctl.h> |
14 | #include "dccp.h" | ||
14 | #include "feat.h" | 15 | #include "feat.h" |
15 | 16 | ||
16 | #ifndef CONFIG_SYSCTL | 17 | #ifndef CONFIG_SYSCTL |
@@ -19,53 +20,76 @@ | |||
19 | 20 | ||
20 | static struct ctl_table dccp_default_table[] = { | 21 | static struct ctl_table dccp_default_table[] = { |
21 | { | 22 | { |
22 | .ctl_name = NET_DCCP_DEFAULT_SEQ_WINDOW, | ||
23 | .procname = "seq_window", | 23 | .procname = "seq_window", |
24 | .data = &dccp_feat_default_sequence_window, | 24 | .data = &sysctl_dccp_feat_sequence_window, |
25 | .maxlen = sizeof(dccp_feat_default_sequence_window), | 25 | .maxlen = sizeof(sysctl_dccp_feat_sequence_window), |
26 | .mode = 0644, | 26 | .mode = 0644, |
27 | .proc_handler = proc_dointvec, | 27 | .proc_handler = proc_dointvec, |
28 | }, | 28 | }, |
29 | { | 29 | { |
30 | .ctl_name = NET_DCCP_DEFAULT_RX_CCID, | ||
31 | .procname = "rx_ccid", | 30 | .procname = "rx_ccid", |
32 | .data = &dccp_feat_default_rx_ccid, | 31 | .data = &sysctl_dccp_feat_rx_ccid, |
33 | .maxlen = sizeof(dccp_feat_default_rx_ccid), | 32 | .maxlen = sizeof(sysctl_dccp_feat_rx_ccid), |
34 | .mode = 0644, | 33 | .mode = 0644, |
35 | .proc_handler = proc_dointvec, | 34 | .proc_handler = proc_dointvec, |
36 | }, | 35 | }, |
37 | { | 36 | { |
38 | .ctl_name = NET_DCCP_DEFAULT_TX_CCID, | ||
39 | .procname = "tx_ccid", | 37 | .procname = "tx_ccid", |
40 | .data = &dccp_feat_default_tx_ccid, | 38 | .data = &sysctl_dccp_feat_tx_ccid, |
41 | .maxlen = sizeof(dccp_feat_default_tx_ccid), | 39 | .maxlen = sizeof(sysctl_dccp_feat_tx_ccid), |
42 | .mode = 0644, | 40 | .mode = 0644, |
43 | .proc_handler = proc_dointvec, | 41 | .proc_handler = proc_dointvec, |
44 | }, | 42 | }, |
45 | { | 43 | { |
46 | .ctl_name = NET_DCCP_DEFAULT_ACK_RATIO, | ||
47 | .procname = "ack_ratio", | 44 | .procname = "ack_ratio", |
48 | .data = &dccp_feat_default_ack_ratio, | 45 | .data = &sysctl_dccp_feat_ack_ratio, |
49 | .maxlen = sizeof(dccp_feat_default_ack_ratio), | 46 | .maxlen = sizeof(sysctl_dccp_feat_ack_ratio), |
50 | .mode = 0644, | 47 | .mode = 0644, |
51 | .proc_handler = proc_dointvec, | 48 | .proc_handler = proc_dointvec, |
52 | }, | 49 | }, |
53 | { | 50 | { |
54 | .ctl_name = NET_DCCP_DEFAULT_SEND_ACKVEC, | ||
55 | .procname = "send_ackvec", | 51 | .procname = "send_ackvec", |
56 | .data = &dccp_feat_default_send_ack_vector, | 52 | .data = &sysctl_dccp_feat_send_ack_vector, |
57 | .maxlen = sizeof(dccp_feat_default_send_ack_vector), | 53 | .maxlen = sizeof(sysctl_dccp_feat_send_ack_vector), |
58 | .mode = 0644, | 54 | .mode = 0644, |
59 | .proc_handler = proc_dointvec, | 55 | .proc_handler = proc_dointvec, |
60 | }, | 56 | }, |
61 | { | 57 | { |
62 | .ctl_name = NET_DCCP_DEFAULT_SEND_NDP, | ||
63 | .procname = "send_ndp", | 58 | .procname = "send_ndp", |
64 | .data = &dccp_feat_default_send_ndp_count, | 59 | .data = &sysctl_dccp_feat_send_ndp_count, |
65 | .maxlen = sizeof(dccp_feat_default_send_ndp_count), | 60 | .maxlen = sizeof(sysctl_dccp_feat_send_ndp_count), |
66 | .mode = 0644, | 61 | .mode = 0644, |
67 | .proc_handler = proc_dointvec, | 62 | .proc_handler = proc_dointvec, |
68 | }, | 63 | }, |
64 | { | ||
65 | .procname = "request_retries", | ||
66 | .data = &sysctl_dccp_request_retries, | ||
67 | .maxlen = sizeof(sysctl_dccp_request_retries), | ||
68 | .mode = 0644, | ||
69 | .proc_handler = proc_dointvec, | ||
70 | }, | ||
71 | { | ||
72 | .procname = "retries1", | ||
73 | .data = &sysctl_dccp_retries1, | ||
74 | .maxlen = sizeof(sysctl_dccp_retries1), | ||
75 | .mode = 0644, | ||
76 | .proc_handler = proc_dointvec, | ||
77 | }, | ||
78 | { | ||
79 | .procname = "retries2", | ||
80 | .data = &sysctl_dccp_retries2, | ||
81 | .maxlen = sizeof(sysctl_dccp_retries2), | ||
82 | .mode = 0644, | ||
83 | .proc_handler = proc_dointvec, | ||
84 | }, | ||
85 | { | ||
86 | .procname = "tx_qlen", | ||
87 | .data = &sysctl_dccp_tx_qlen, | ||
88 | .maxlen = sizeof(sysctl_dccp_tx_qlen), | ||
89 | .mode = 0644, | ||
90 | .proc_handler = proc_dointvec, | ||
91 | }, | ||
92 | |||
69 | { .ctl_name = 0, } | 93 | { .ctl_name = 0, } |
70 | }; | 94 | }; |
71 | 95 | ||
diff --git a/net/dccp/timer.c b/net/dccp/timer.c index 8447742f5615..e8f519e7f481 100644 --- a/net/dccp/timer.c +++ b/net/dccp/timer.c | |||
@@ -15,15 +15,10 @@ | |||
15 | 15 | ||
16 | #include "dccp.h" | 16 | #include "dccp.h" |
17 | 17 | ||
18 | static void dccp_write_timer(unsigned long data); | 18 | /* sysctl variables governing numbers of retransmission attempts */ |
19 | static void dccp_keepalive_timer(unsigned long data); | 19 | int sysctl_dccp_request_retries __read_mostly = TCP_SYN_RETRIES; |
20 | static void dccp_delack_timer(unsigned long data); | 20 | int sysctl_dccp_retries1 __read_mostly = TCP_RETR1; |
21 | 21 | int sysctl_dccp_retries2 __read_mostly = TCP_RETR2; | |
22 | void dccp_init_xmit_timers(struct sock *sk) | ||
23 | { | ||
24 | inet_csk_init_xmit_timers(sk, &dccp_write_timer, &dccp_delack_timer, | ||
25 | &dccp_keepalive_timer); | ||
26 | } | ||
27 | 22 | ||
28 | static void dccp_write_err(struct sock *sk) | 23 | static void dccp_write_err(struct sock *sk) |
29 | { | 24 | { |
@@ -44,11 +39,10 @@ static int dccp_write_timeout(struct sock *sk) | |||
44 | if (sk->sk_state == DCCP_REQUESTING || sk->sk_state == DCCP_PARTOPEN) { | 39 | if (sk->sk_state == DCCP_REQUESTING || sk->sk_state == DCCP_PARTOPEN) { |
45 | if (icsk->icsk_retransmits != 0) | 40 | if (icsk->icsk_retransmits != 0) |
46 | dst_negative_advice(&sk->sk_dst_cache); | 41 | dst_negative_advice(&sk->sk_dst_cache); |
47 | retry_until = icsk->icsk_syn_retries ? : | 42 | retry_until = icsk->icsk_syn_retries ? |
48 | /* FIXME! */ 3 /* FIXME! sysctl_tcp_syn_retries */; | 43 | : sysctl_dccp_request_retries; |
49 | } else { | 44 | } else { |
50 | if (icsk->icsk_retransmits >= | 45 | if (icsk->icsk_retransmits >= sysctl_dccp_retries1) { |
51 | /* FIXME! sysctl_tcp_retries1 */ 5 /* FIXME! */) { | ||
52 | /* NOTE. draft-ietf-tcpimpl-pmtud-01.txt requires pmtu | 46 | /* NOTE. draft-ietf-tcpimpl-pmtud-01.txt requires pmtu |
53 | black hole detection. :-( | 47 | black hole detection. :-( |
54 | 48 | ||
@@ -72,7 +66,7 @@ static int dccp_write_timeout(struct sock *sk) | |||
72 | dst_negative_advice(&sk->sk_dst_cache); | 66 | dst_negative_advice(&sk->sk_dst_cache); |
73 | } | 67 | } |
74 | 68 | ||
75 | retry_until = /* FIXME! */ 15 /* FIXME! sysctl_tcp_retries2 */; | 69 | retry_until = sysctl_dccp_retries2; |
76 | /* | 70 | /* |
77 | * FIXME: see tcp_write_timout and tcp_out_of_resources | 71 | * FIXME: see tcp_write_timout and tcp_out_of_resources |
78 | */ | 72 | */ |
@@ -86,53 +80,6 @@ static int dccp_write_timeout(struct sock *sk) | |||
86 | return 0; | 80 | return 0; |
87 | } | 81 | } |
88 | 82 | ||
89 | /* This is the same as tcp_delack_timer, sans prequeue & mem_reclaim stuff */ | ||
90 | static void dccp_delack_timer(unsigned long data) | ||
91 | { | ||
92 | struct sock *sk = (struct sock *)data; | ||
93 | struct inet_connection_sock *icsk = inet_csk(sk); | ||
94 | |||
95 | bh_lock_sock(sk); | ||
96 | if (sock_owned_by_user(sk)) { | ||
97 | /* Try again later. */ | ||
98 | icsk->icsk_ack.blocked = 1; | ||
99 | NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOCKED); | ||
100 | sk_reset_timer(sk, &icsk->icsk_delack_timer, | ||
101 | jiffies + TCP_DELACK_MIN); | ||
102 | goto out; | ||
103 | } | ||
104 | |||
105 | if (sk->sk_state == DCCP_CLOSED || | ||
106 | !(icsk->icsk_ack.pending & ICSK_ACK_TIMER)) | ||
107 | goto out; | ||
108 | if (time_after(icsk->icsk_ack.timeout, jiffies)) { | ||
109 | sk_reset_timer(sk, &icsk->icsk_delack_timer, | ||
110 | icsk->icsk_ack.timeout); | ||
111 | goto out; | ||
112 | } | ||
113 | |||
114 | icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER; | ||
115 | |||
116 | if (inet_csk_ack_scheduled(sk)) { | ||
117 | if (!icsk->icsk_ack.pingpong) { | ||
118 | /* Delayed ACK missed: inflate ATO. */ | ||
119 | icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1, | ||
120 | icsk->icsk_rto); | ||
121 | } else { | ||
122 | /* Delayed ACK missed: leave pingpong mode and | ||
123 | * deflate ATO. | ||
124 | */ | ||
125 | icsk->icsk_ack.pingpong = 0; | ||
126 | icsk->icsk_ack.ato = TCP_ATO_MIN; | ||
127 | } | ||
128 | dccp_send_ack(sk); | ||
129 | NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKS); | ||
130 | } | ||
131 | out: | ||
132 | bh_unlock_sock(sk); | ||
133 | sock_put(sk); | ||
134 | } | ||
135 | |||
136 | /* | 83 | /* |
137 | * The DCCP retransmit timer. | 84 | * The DCCP retransmit timer. |
138 | */ | 85 | */ |
@@ -142,7 +89,7 @@ static void dccp_retransmit_timer(struct sock *sk) | |||
142 | 89 | ||
143 | /* retransmit timer is used for feature negotiation throughout | 90 | /* retransmit timer is used for feature negotiation throughout |
144 | * connection. In this case, no packet is re-transmitted, but rather an | 91 | * connection. In this case, no packet is re-transmitted, but rather an |
145 | * ack is generated and pending changes are splaced into its options. | 92 | * ack is generated and pending changes are placed into its options. |
146 | */ | 93 | */ |
147 | if (sk->sk_send_head == NULL) { | 94 | if (sk->sk_send_head == NULL) { |
148 | dccp_pr_debug("feat negotiation retransmit timeout %p\n", sk); | 95 | dccp_pr_debug("feat negotiation retransmit timeout %p\n", sk); |
@@ -154,9 +101,11 @@ static void dccp_retransmit_timer(struct sock *sk) | |||
154 | /* | 101 | /* |
155 | * sk->sk_send_head has to have one skb with | 102 | * sk->sk_send_head has to have one skb with |
156 | * DCCP_SKB_CB(skb)->dccpd_type set to one of the retransmittable DCCP | 103 | * DCCP_SKB_CB(skb)->dccpd_type set to one of the retransmittable DCCP |
157 | * packet types (REQUEST, RESPONSE, the ACK in the 3way handshake | 104 | * packet types. The only packets eligible for retransmission are: |
158 | * (PARTOPEN timer), etc). | 105 | * -- Requests in client-REQUEST state (sec. 8.1.1) |
159 | */ | 106 | * -- Acks in client-PARTOPEN state (sec. 8.1.5) |
107 | * -- CloseReq in server-CLOSEREQ state (sec. 8.3) | ||
108 | * -- Close in node-CLOSING state (sec. 8.3) */ | ||
160 | BUG_TRAP(sk->sk_send_head != NULL); | 109 | BUG_TRAP(sk->sk_send_head != NULL); |
161 | 110 | ||
162 | /* | 111 | /* |
@@ -194,7 +143,7 @@ backoff: | |||
194 | icsk->icsk_rto = min(icsk->icsk_rto << 1, DCCP_RTO_MAX); | 143 | icsk->icsk_rto = min(icsk->icsk_rto << 1, DCCP_RTO_MAX); |
195 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, | 144 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, |
196 | DCCP_RTO_MAX); | 145 | DCCP_RTO_MAX); |
197 | if (icsk->icsk_retransmits > 3 /* FIXME: sysctl_dccp_retries1 */) | 146 | if (icsk->icsk_retransmits > sysctl_dccp_retries1) |
198 | __sk_dst_reset(sk); | 147 | __sk_dst_reset(sk); |
199 | out:; | 148 | out:; |
200 | } | 149 | } |
@@ -264,3 +213,56 @@ out: | |||
264 | bh_unlock_sock(sk); | 213 | bh_unlock_sock(sk); |
265 | sock_put(sk); | 214 | sock_put(sk); |
266 | } | 215 | } |
216 | |||
217 | /* This is the same as tcp_delack_timer, sans prequeue & mem_reclaim stuff */ | ||
218 | static void dccp_delack_timer(unsigned long data) | ||
219 | { | ||
220 | struct sock *sk = (struct sock *)data; | ||
221 | struct inet_connection_sock *icsk = inet_csk(sk); | ||
222 | |||
223 | bh_lock_sock(sk); | ||
224 | if (sock_owned_by_user(sk)) { | ||
225 | /* Try again later. */ | ||
226 | icsk->icsk_ack.blocked = 1; | ||
227 | NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOCKED); | ||
228 | sk_reset_timer(sk, &icsk->icsk_delack_timer, | ||
229 | jiffies + TCP_DELACK_MIN); | ||
230 | goto out; | ||
231 | } | ||
232 | |||
233 | if (sk->sk_state == DCCP_CLOSED || | ||
234 | !(icsk->icsk_ack.pending & ICSK_ACK_TIMER)) | ||
235 | goto out; | ||
236 | if (time_after(icsk->icsk_ack.timeout, jiffies)) { | ||
237 | sk_reset_timer(sk, &icsk->icsk_delack_timer, | ||
238 | icsk->icsk_ack.timeout); | ||
239 | goto out; | ||
240 | } | ||
241 | |||
242 | icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER; | ||
243 | |||
244 | if (inet_csk_ack_scheduled(sk)) { | ||
245 | if (!icsk->icsk_ack.pingpong) { | ||
246 | /* Delayed ACK missed: inflate ATO. */ | ||
247 | icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1, | ||
248 | icsk->icsk_rto); | ||
249 | } else { | ||
250 | /* Delayed ACK missed: leave pingpong mode and | ||
251 | * deflate ATO. | ||
252 | */ | ||
253 | icsk->icsk_ack.pingpong = 0; | ||
254 | icsk->icsk_ack.ato = TCP_ATO_MIN; | ||
255 | } | ||
256 | dccp_send_ack(sk); | ||
257 | NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKS); | ||
258 | } | ||
259 | out: | ||
260 | bh_unlock_sock(sk); | ||
261 | sock_put(sk); | ||
262 | } | ||
263 | |||
264 | void dccp_init_xmit_timers(struct sock *sk) | ||
265 | { | ||
266 | inet_csk_init_xmit_timers(sk, &dccp_write_timer, &dccp_delack_timer, | ||
267 | &dccp_keepalive_timer); | ||
268 | } | ||