aboutsummaryrefslogtreecommitdiffstats
path: root/net/dccp
diff options
context:
space:
mode:
Diffstat (limited to 'net/dccp')
-rw-r--r--net/dccp/Kconfig13
-rw-r--r--net/dccp/Makefile9
-rw-r--r--net/dccp/ackvec.c296
-rw-r--r--net/dccp/ackvec.h53
-rw-r--r--net/dccp/ccid.c189
-rw-r--r--net/dccp/ccid.h129
-rw-r--r--net/dccp/ccids/Kconfig43
-rw-r--r--net/dccp/ccids/Makefile4
-rw-r--r--net/dccp/ccids/ccid2.c779
-rw-r--r--net/dccp/ccids/ccid2.h85
-rw-r--r--net/dccp/ccids/ccid3.c112
-rw-r--r--net/dccp/ccids/ccid3.h5
-rw-r--r--net/dccp/dccp.h133
-rw-r--r--net/dccp/diag.c2
-rw-r--r--net/dccp/feat.c586
-rw-r--r--net/dccp/feat.h29
-rw-r--r--net/dccp/input.c28
-rw-r--r--net/dccp/ipv4.c333
-rw-r--r--net/dccp/ipv6.c371
-rw-r--r--net/dccp/minisocks.c37
-rw-r--r--net/dccp/options.c291
-rw-r--r--net/dccp/output.c88
-rw-r--r--net/dccp/proto.c440
-rw-r--r--net/dccp/sysctl.c124
-rw-r--r--net/dccp/timer.c14
25 files changed, 3154 insertions, 1039 deletions
diff --git a/net/dccp/Kconfig b/net/dccp/Kconfig
index 187ac182e24b..7e096ba8454f 100644
--- a/net/dccp/Kconfig
+++ b/net/dccp/Kconfig
@@ -24,6 +24,10 @@ config INET_DCCP_DIAG
24 def_tristate y if (IP_DCCP = y && INET_DIAG = y) 24 def_tristate y if (IP_DCCP = y && INET_DIAG = y)
25 def_tristate m 25 def_tristate m
26 26
27config IP_DCCP_ACKVEC
28 depends on IP_DCCP
29 def_bool N
30
27source "net/dccp/ccids/Kconfig" 31source "net/dccp/ccids/Kconfig"
28 32
29menu "DCCP Kernel Hacking" 33menu "DCCP Kernel Hacking"
@@ -36,15 +40,6 @@ config IP_DCCP_DEBUG
36 40
37 Just say N. 41 Just say N.
38 42
39config IP_DCCP_UNLOAD_HACK
40 depends on IP_DCCP=m && IP_DCCP_CCID3=m
41 bool "DCCP control sock unload hack"
42 ---help---
43 Enable this to be able to unload the dccp module when the it
44 has only one refcount held, the control sock one. Just execute
45 "rmmod dccp_ccid3 dccp"
46
47 Just say N.
48endmenu 43endmenu
49 44
50endmenu 45endmenu
diff --git a/net/dccp/Makefile b/net/dccp/Makefile
index 87b27fff6e3b..7696e219b05d 100644
--- a/net/dccp/Makefile
+++ b/net/dccp/Makefile
@@ -2,15 +2,18 @@ obj-$(CONFIG_IPV6) += dccp_ipv6.o
2 2
3dccp_ipv6-y := ipv6.o 3dccp_ipv6-y := ipv6.o
4 4
5obj-$(CONFIG_IP_DCCP) += dccp.o 5obj-$(CONFIG_IP_DCCP) += dccp.o dccp_ipv4.o
6 6
7dccp-y := ccid.o input.o ipv4.o minisocks.o options.o output.o proto.o \ 7dccp-y := ccid.o feat.o input.o minisocks.o options.o output.o proto.o timer.o
8 timer.o 8
9dccp_ipv4-y := ipv4.o
9 10
10dccp-$(CONFIG_IP_DCCP_ACKVEC) += ackvec.o 11dccp-$(CONFIG_IP_DCCP_ACKVEC) += ackvec.o
11 12
12obj-$(CONFIG_INET_DCCP_DIAG) += dccp_diag.o 13obj-$(CONFIG_INET_DCCP_DIAG) += dccp_diag.o
13 14
15dccp-$(CONFIG_SYSCTL) += sysctl.o
16
14dccp_diag-y := diag.o 17dccp_diag-y := diag.o
15 18
16obj-y += ccids/ 19obj-y += ccids/
diff --git a/net/dccp/ackvec.c b/net/dccp/ackvec.c
index 2c77dafbd091..b5981e5f6b00 100644
--- a/net/dccp/ackvec.c
+++ b/net/dccp/ackvec.c
@@ -13,36 +13,83 @@
13#include "dccp.h" 13#include "dccp.h"
14 14
15#include <linux/dccp.h> 15#include <linux/dccp.h>
16#include <linux/init.h>
17#include <linux/errno.h>
18#include <linux/kernel.h>
16#include <linux/skbuff.h> 19#include <linux/skbuff.h>
20#include <linux/slab.h>
17 21
18#include <net/sock.h> 22#include <net/sock.h>
19 23
24static kmem_cache_t *dccp_ackvec_slab;
25static kmem_cache_t *dccp_ackvec_record_slab;
26
27static struct dccp_ackvec_record *dccp_ackvec_record_new(void)
28{
29 struct dccp_ackvec_record *avr =
30 kmem_cache_alloc(dccp_ackvec_record_slab, GFP_ATOMIC);
31
32 if (avr != NULL)
33 INIT_LIST_HEAD(&avr->dccpavr_node);
34
35 return avr;
36}
37
38static void dccp_ackvec_record_delete(struct dccp_ackvec_record *avr)
39{
40 if (unlikely(avr == NULL))
41 return;
42 /* Check if deleting a linked record */
43 WARN_ON(!list_empty(&avr->dccpavr_node));
44 kmem_cache_free(dccp_ackvec_record_slab, avr);
45}
46
47static void dccp_ackvec_insert_avr(struct dccp_ackvec *av,
48 struct dccp_ackvec_record *avr)
49{
50 /*
51 * AVRs are sorted by seqno. Since we are sending them in order, we
52 * just add the AVR at the head of the list.
53 * -sorbo.
54 */
55 if (!list_empty(&av->dccpav_records)) {
56 const struct dccp_ackvec_record *head =
57 list_entry(av->dccpav_records.next,
58 struct dccp_ackvec_record,
59 dccpavr_node);
60 BUG_ON(before48(avr->dccpavr_ack_seqno,
61 head->dccpavr_ack_seqno));
62 }
63
64 list_add(&avr->dccpavr_node, &av->dccpav_records);
65}
66
20int dccp_insert_option_ackvec(struct sock *sk, struct sk_buff *skb) 67int dccp_insert_option_ackvec(struct sock *sk, struct sk_buff *skb)
21{ 68{
22 struct dccp_sock *dp = dccp_sk(sk); 69 struct dccp_sock *dp = dccp_sk(sk);
70#ifdef CONFIG_IP_DCCP_DEBUG
71 const char *debug_prefix = dp->dccps_role == DCCP_ROLE_CLIENT ?
72 "CLIENT tx: " : "server tx: ";
73#endif
23 struct dccp_ackvec *av = dp->dccps_hc_rx_ackvec; 74 struct dccp_ackvec *av = dp->dccps_hc_rx_ackvec;
24 int len = av->dccpav_vec_len + 2; 75 int len = av->dccpav_vec_len + 2;
25 struct timeval now; 76 struct timeval now;
26 u32 elapsed_time; 77 u32 elapsed_time;
27 unsigned char *to, *from; 78 unsigned char *to, *from;
79 struct dccp_ackvec_record *avr;
80
81 if (DCCP_SKB_CB(skb)->dccpd_opt_len + len > DCCP_MAX_OPT_LEN)
82 return -1;
28 83
29 dccp_timestamp(sk, &now); 84 dccp_timestamp(sk, &now);
30 elapsed_time = timeval_delta(&now, &av->dccpav_time) / 10; 85 elapsed_time = timeval_delta(&now, &av->dccpav_time) / 10;
31 86
32 if (elapsed_time != 0) 87 if (elapsed_time != 0 &&
33 dccp_insert_option_elapsed_time(sk, skb, elapsed_time); 88 dccp_insert_option_elapsed_time(sk, skb, elapsed_time))
34
35 if (DCCP_SKB_CB(skb)->dccpd_opt_len + len > DCCP_MAX_OPT_LEN)
36 return -1; 89 return -1;
37 90
38 /* 91 avr = dccp_ackvec_record_new();
39 * XXX: now we have just one ack vector sent record, so 92 if (avr == NULL)
40 * we have to wait for it to be cleared.
41 *
42 * Of course this is not acceptable, but this is just for
43 * basic testing now.
44 */
45 if (av->dccpav_ack_seqno != DCCP_MAX_SEQNO + 1)
46 return -1; 93 return -1;
47 94
48 DCCP_SKB_CB(skb)->dccpd_opt_len += len; 95 DCCP_SKB_CB(skb)->dccpd_opt_len += len;
@@ -55,8 +102,8 @@ int dccp_insert_option_ackvec(struct sock *sk, struct sk_buff *skb)
55 from = av->dccpav_buf + av->dccpav_buf_head; 102 from = av->dccpav_buf + av->dccpav_buf_head;
56 103
57 /* Check if buf_head wraps */ 104 /* Check if buf_head wraps */
58 if ((int)av->dccpav_buf_head + len > av->dccpav_vec_len) { 105 if ((int)av->dccpav_buf_head + len > DCCP_MAX_ACKVEC_LEN) {
59 const u32 tailsize = av->dccpav_vec_len - av->dccpav_buf_head; 106 const u32 tailsize = DCCP_MAX_ACKVEC_LEN - av->dccpav_buf_head;
60 107
61 memcpy(to, from, tailsize); 108 memcpy(to, from, tailsize);
62 to += tailsize; 109 to += tailsize;
@@ -73,45 +120,37 @@ int dccp_insert_option_ackvec(struct sock *sk, struct sk_buff *skb)
73 * sequence number it used for the ack packet; ack_ptr will equal 120 * sequence number it used for the ack packet; ack_ptr will equal
74 * buf_head; ack_ackno will equal buf_ackno; and ack_nonce will 121 * buf_head; ack_ackno will equal buf_ackno; and ack_nonce will
75 * equal buf_nonce. 122 * equal buf_nonce.
76 *
77 * This implemention uses just one ack record for now.
78 */ 123 */
79 av->dccpav_ack_seqno = DCCP_SKB_CB(skb)->dccpd_seq; 124 avr->dccpavr_ack_seqno = DCCP_SKB_CB(skb)->dccpd_seq;
80 av->dccpav_ack_ptr = av->dccpav_buf_head; 125 avr->dccpavr_ack_ptr = av->dccpav_buf_head;
81 av->dccpav_ack_ackno = av->dccpav_buf_ackno; 126 avr->dccpavr_ack_ackno = av->dccpav_buf_ackno;
82 av->dccpav_ack_nonce = av->dccpav_buf_nonce; 127 avr->dccpavr_ack_nonce = av->dccpav_buf_nonce;
83 av->dccpav_sent_len = av->dccpav_vec_len; 128 avr->dccpavr_sent_len = av->dccpav_vec_len;
129
130 dccp_ackvec_insert_avr(av, avr);
84 131
85 dccp_pr_debug("%sACK Vector 0, len=%d, ack_seqno=%llu, " 132 dccp_pr_debug("%sACK Vector 0, len=%d, ack_seqno=%llu, "
86 "ack_ackno=%llu\n", 133 "ack_ackno=%llu\n",
87 debug_prefix, av->dccpav_sent_len, 134 debug_prefix, avr->dccpavr_sent_len,
88 (unsigned long long)av->dccpav_ack_seqno, 135 (unsigned long long)avr->dccpavr_ack_seqno,
89 (unsigned long long)av->dccpav_ack_ackno); 136 (unsigned long long)avr->dccpavr_ack_ackno);
90 return -1; 137 return 0;
91} 138}
92 139
93struct dccp_ackvec *dccp_ackvec_alloc(const unsigned int len, 140struct dccp_ackvec *dccp_ackvec_alloc(const gfp_t priority)
94 const gfp_t priority)
95{ 141{
96 struct dccp_ackvec *av; 142 struct dccp_ackvec *av = kmem_cache_alloc(dccp_ackvec_slab, priority);
97
98 BUG_ON(len == 0);
99 143
100 if (len > DCCP_MAX_ACKVEC_LEN)
101 return NULL;
102
103 av = kmalloc(sizeof(*av) + len, priority);
104 if (av != NULL) { 144 if (av != NULL) {
105 av->dccpav_buf_len = len;
106 av->dccpav_buf_head = 145 av->dccpav_buf_head =
107 av->dccpav_buf_tail = av->dccpav_buf_len - 1; 146 av->dccpav_buf_tail = DCCP_MAX_ACKVEC_LEN - 1;
108 av->dccpav_buf_ackno = 147 av->dccpav_buf_ackno = DCCP_MAX_SEQNO + 1;
109 av->dccpav_ack_ackno = av->dccpav_ack_seqno = ~0LLU;
110 av->dccpav_buf_nonce = av->dccpav_buf_nonce = 0; 148 av->dccpav_buf_nonce = av->dccpav_buf_nonce = 0;
111 av->dccpav_ack_ptr = 0; 149 av->dccpav_ack_ptr = 0;
112 av->dccpav_time.tv_sec = 0; 150 av->dccpav_time.tv_sec = 0;
113 av->dccpav_time.tv_usec = 0; 151 av->dccpav_time.tv_usec = 0;
114 av->dccpav_sent_len = av->dccpav_vec_len = 0; 152 av->dccpav_sent_len = av->dccpav_vec_len = 0;
153 INIT_LIST_HEAD(&av->dccpav_records);
115 } 154 }
116 155
117 return av; 156 return av;
@@ -119,7 +158,20 @@ struct dccp_ackvec *dccp_ackvec_alloc(const unsigned int len,
119 158
120void dccp_ackvec_free(struct dccp_ackvec *av) 159void dccp_ackvec_free(struct dccp_ackvec *av)
121{ 160{
122 kfree(av); 161 if (unlikely(av == NULL))
162 return;
163
164 if (!list_empty(&av->dccpav_records)) {
165 struct dccp_ackvec_record *avr, *next;
166
167 list_for_each_entry_safe(avr, next, &av->dccpav_records,
168 dccpavr_node) {
169 list_del_init(&avr->dccpavr_node);
170 dccp_ackvec_record_delete(avr);
171 }
172 }
173
174 kmem_cache_free(dccp_ackvec_slab, av);
123} 175}
124 176
125static inline u8 dccp_ackvec_state(const struct dccp_ackvec *av, 177static inline u8 dccp_ackvec_state(const struct dccp_ackvec *av,
@@ -146,7 +198,7 @@ static inline int dccp_ackvec_set_buf_head_state(struct dccp_ackvec *av,
146 unsigned int gap; 198 unsigned int gap;
147 long new_head; 199 long new_head;
148 200
149 if (av->dccpav_vec_len + packets > av->dccpav_buf_len) 201 if (av->dccpav_vec_len + packets > DCCP_MAX_ACKVEC_LEN)
150 return -ENOBUFS; 202 return -ENOBUFS;
151 203
152 gap = packets - 1; 204 gap = packets - 1;
@@ -158,7 +210,7 @@ static inline int dccp_ackvec_set_buf_head_state(struct dccp_ackvec *av,
158 gap + new_head + 1); 210 gap + new_head + 1);
159 gap = -new_head; 211 gap = -new_head;
160 } 212 }
161 new_head += av->dccpav_buf_len; 213 new_head += DCCP_MAX_ACKVEC_LEN;
162 } 214 }
163 215
164 av->dccpav_buf_head = new_head; 216 av->dccpav_buf_head = new_head;
@@ -251,7 +303,7 @@ int dccp_ackvec_add(struct dccp_ackvec *av, const struct sock *sk,
251 goto out_duplicate; 303 goto out_duplicate;
252 304
253 delta -= len + 1; 305 delta -= len + 1;
254 if (++index == av->dccpav_buf_len) 306 if (++index == DCCP_MAX_ACKVEC_LEN)
255 index = 0; 307 index = 0;
256 } 308 }
257 } 309 }
@@ -259,7 +311,6 @@ int dccp_ackvec_add(struct dccp_ackvec *av, const struct sock *sk,
259 av->dccpav_buf_ackno = ackno; 311 av->dccpav_buf_ackno = ackno;
260 dccp_timestamp(sk, &av->dccpav_time); 312 dccp_timestamp(sk, &av->dccpav_time);
261out: 313out:
262 dccp_pr_debug("");
263 return 0; 314 return 0;
264 315
265out_duplicate: 316out_duplicate:
@@ -297,44 +348,50 @@ void dccp_ackvec_print(const struct dccp_ackvec *av)
297} 348}
298#endif 349#endif
299 350
300static void dccp_ackvec_throw_away_ack_record(struct dccp_ackvec *av) 351static void dccp_ackvec_throw_record(struct dccp_ackvec *av,
352 struct dccp_ackvec_record *avr)
301{ 353{
302 /* 354 struct dccp_ackvec_record *next;
303 * As we're keeping track of the ack vector size (dccpav_vec_len) and 355
304 * the sent ack vector size (dccpav_sent_len) we don't need 356 av->dccpav_buf_tail = avr->dccpavr_ack_ptr - 1;
305 * dccpav_buf_tail at all, but keep this code here as in the future 357 if (av->dccpav_buf_tail == 0)
306 * we'll implement a vector of ack records, as suggested in 358 av->dccpav_buf_tail = DCCP_MAX_ACKVEC_LEN - 1;
307 * draft-ietf-dccp-spec-11.txt Appendix A. -acme 359
308 */ 360 av->dccpav_vec_len -= avr->dccpavr_sent_len;
309#if 0 361
310 u32 new_buf_tail = av->dccpav_ack_ptr + 1; 362 /* free records */
311 if (new_buf_tail >= av->dccpav_vec_len) 363 list_for_each_entry_safe_from(avr, next, &av->dccpav_records,
312 new_buf_tail -= av->dccpav_vec_len; 364 dccpavr_node) {
313 av->dccpav_buf_tail = new_buf_tail; 365 list_del_init(&avr->dccpavr_node);
314#endif 366 dccp_ackvec_record_delete(avr);
315 av->dccpav_vec_len -= av->dccpav_sent_len; 367 }
316} 368}
317 369
318void dccp_ackvec_check_rcv_ackno(struct dccp_ackvec *av, struct sock *sk, 370void dccp_ackvec_check_rcv_ackno(struct dccp_ackvec *av, struct sock *sk,
319 const u64 ackno) 371 const u64 ackno)
320{ 372{
321 /* Check if we actually sent an ACK vector */ 373 struct dccp_ackvec_record *avr;
322 if (av->dccpav_ack_seqno == DCCP_MAX_SEQNO + 1)
323 return;
324 374
325 if (ackno == av->dccpav_ack_seqno) { 375 /*
376 * If we traverse backwards, it should be faster when we have large
377 * windows. We will be receiving ACKs for stuff we sent a while back
378 * -sorbo.
379 */
380 list_for_each_entry_reverse(avr, &av->dccpav_records, dccpavr_node) {
381 if (ackno == avr->dccpavr_ack_seqno) {
326#ifdef CONFIG_IP_DCCP_DEBUG 382#ifdef CONFIG_IP_DCCP_DEBUG
327 struct dccp_sock *dp = dccp_sk(sk); 383 struct dccp_sock *dp = dccp_sk(sk);
328 const char *debug_prefix = dp->dccps_role == DCCP_ROLE_CLIENT ? 384 const char *debug_prefix = dp->dccps_role == DCCP_ROLE_CLIENT ?
329 "CLIENT rx ack: " : "server rx ack: "; 385 "CLIENT rx ack: " : "server rx ack: ";
330#endif 386#endif
331 dccp_pr_debug("%sACK packet 0, len=%d, ack_seqno=%llu, " 387 dccp_pr_debug("%sACK packet 0, len=%d, ack_seqno=%llu, "
332 "ack_ackno=%llu, ACKED!\n", 388 "ack_ackno=%llu, ACKED!\n",
333 debug_prefix, 1, 389 debug_prefix, 1,
334 (unsigned long long)av->dccpav_ack_seqno, 390 (unsigned long long)avr->dccpavr_ack_seqno,
335 (unsigned long long)av->dccpav_ack_ackno); 391 (unsigned long long)avr->dccpavr_ack_ackno);
336 dccp_ackvec_throw_away_ack_record(av); 392 dccp_ackvec_throw_record(av, avr);
337 av->dccpav_ack_seqno = DCCP_MAX_SEQNO + 1; 393 break;
394 }
338 } 395 }
339} 396}
340 397
@@ -344,28 +401,20 @@ static void dccp_ackvec_check_rcv_ackvector(struct dccp_ackvec *av,
344 const unsigned char *vector) 401 const unsigned char *vector)
345{ 402{
346 unsigned char i; 403 unsigned char i;
404 struct dccp_ackvec_record *avr;
347 405
348 /* Check if we actually sent an ACK vector */ 406 /* Check if we actually sent an ACK vector */
349 if (av->dccpav_ack_seqno == DCCP_MAX_SEQNO + 1) 407 if (list_empty(&av->dccpav_records))
350 return;
351 /*
352 * We're in the receiver half connection, so if the received an ACK
353 * vector ackno (e.g. 50) before dccpav_ack_seqno (e.g. 52), we're
354 * not interested.
355 *
356 * Extra explanation with example:
357 *
358 * if we received an ACK vector with ackno 50, it can only be acking
359 * 50, 49, 48, etc, not 52 (the seqno for the ACK vector we sent).
360 */
361 /* dccp_pr_debug("is %llu < %llu? ", ackno, av->dccpav_ack_seqno); */
362 if (before48(ackno, av->dccpav_ack_seqno)) {
363 /* dccp_pr_debug_cat("yes\n"); */
364 return; 408 return;
365 }
366 /* dccp_pr_debug_cat("no\n"); */
367 409
368 i = len; 410 i = len;
411 /*
412 * XXX
413 * I think it might be more efficient to work backwards. See comment on
414 * rcv_ackno. -sorbo.
415 */
416 avr = list_entry(av->dccpav_records.next, struct dccp_ackvec_record,
417 dccpavr_node);
369 while (i--) { 418 while (i--) {
370 const u8 rl = *vector & DCCP_ACKVEC_LEN_MASK; 419 const u8 rl = *vector & DCCP_ACKVEC_LEN_MASK;
371 u64 ackno_end_rl; 420 u64 ackno_end_rl;
@@ -373,14 +422,20 @@ static void dccp_ackvec_check_rcv_ackvector(struct dccp_ackvec *av,
373 dccp_set_seqno(&ackno_end_rl, ackno - rl); 422 dccp_set_seqno(&ackno_end_rl, ackno - rl);
374 423
375 /* 424 /*
376 * dccp_pr_debug("is %llu <= %llu <= %llu? ", ackno_end_rl, 425 * If our AVR sequence number is greater than the ack, go
377 * av->dccpav_ack_seqno, ackno); 426 * forward in the AVR list until it is not so.
378 */ 427 */
379 if (between48(av->dccpav_ack_seqno, ackno_end_rl, ackno)) { 428 list_for_each_entry_from(avr, &av->dccpav_records,
429 dccpavr_node) {
430 if (!after48(avr->dccpavr_ack_seqno, ackno))
431 goto found;
432 }
433 /* End of the dccpav_records list, not found, exit */
434 break;
435found:
436 if (between48(avr->dccpavr_ack_seqno, ackno_end_rl, ackno)) {
380 const u8 state = (*vector & 437 const u8 state = (*vector &
381 DCCP_ACKVEC_STATE_MASK) >> 6; 438 DCCP_ACKVEC_STATE_MASK) >> 6;
382 /* dccp_pr_debug_cat("yes\n"); */
383
384 if (state != DCCP_ACKVEC_STATE_NOT_RECEIVED) { 439 if (state != DCCP_ACKVEC_STATE_NOT_RECEIVED) {
385#ifdef CONFIG_IP_DCCP_DEBUG 440#ifdef CONFIG_IP_DCCP_DEBUG
386 struct dccp_sock *dp = dccp_sk(sk); 441 struct dccp_sock *dp = dccp_sk(sk);
@@ -393,19 +448,16 @@ static void dccp_ackvec_check_rcv_ackvector(struct dccp_ackvec *av,
393 "ACKED!\n", 448 "ACKED!\n",
394 debug_prefix, len, 449 debug_prefix, len,
395 (unsigned long long) 450 (unsigned long long)
396 av->dccpav_ack_seqno, 451 avr->dccpavr_ack_seqno,
397 (unsigned long long) 452 (unsigned long long)
398 av->dccpav_ack_ackno); 453 avr->dccpavr_ack_ackno);
399 dccp_ackvec_throw_away_ack_record(av); 454 dccp_ackvec_throw_record(av, avr);
400 } 455 }
401 /* 456 /*
402 * If dccpav_ack_seqno was not received, no problem 457 * If it wasn't received, continue scanning... we might
403 * we'll send another ACK vector. 458 * find another one.
404 */ 459 */
405 av->dccpav_ack_seqno = DCCP_MAX_SEQNO + 1;
406 break;
407 } 460 }
408 /* dccp_pr_debug_cat("no\n"); */
409 461
410 dccp_set_seqno(&ackno, ackno_end_rl - 1); 462 dccp_set_seqno(&ackno, ackno_end_rl - 1);
411 ++vector; 463 ++vector;
@@ -424,3 +476,43 @@ int dccp_ackvec_parse(struct sock *sk, const struct sk_buff *skb,
424 len, value); 476 len, value);
425 return 0; 477 return 0;
426} 478}
479
480static char dccp_ackvec_slab_msg[] __initdata =
481 KERN_CRIT "DCCP: Unable to create ack vectors slab caches\n";
482
483int __init dccp_ackvec_init(void)
484{
485 dccp_ackvec_slab = kmem_cache_create("dccp_ackvec",
486 sizeof(struct dccp_ackvec), 0,
487 SLAB_HWCACHE_ALIGN, NULL, NULL);
488 if (dccp_ackvec_slab == NULL)
489 goto out_err;
490
491 dccp_ackvec_record_slab =
492 kmem_cache_create("dccp_ackvec_record",
493 sizeof(struct dccp_ackvec_record),
494 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
495 if (dccp_ackvec_record_slab == NULL)
496 goto out_destroy_slab;
497
498 return 0;
499
500out_destroy_slab:
501 kmem_cache_destroy(dccp_ackvec_slab);
502 dccp_ackvec_slab = NULL;
503out_err:
504 printk(dccp_ackvec_slab_msg);
505 return -ENOBUFS;
506}
507
508void dccp_ackvec_exit(void)
509{
510 if (dccp_ackvec_slab != NULL) {
511 kmem_cache_destroy(dccp_ackvec_slab);
512 dccp_ackvec_slab = NULL;
513 }
514 if (dccp_ackvec_record_slab != NULL) {
515 kmem_cache_destroy(dccp_ackvec_record_slab);
516 dccp_ackvec_record_slab = NULL;
517 }
518}
diff --git a/net/dccp/ackvec.h b/net/dccp/ackvec.h
index f7dfb5f67b87..ec7a89bb7b39 100644
--- a/net/dccp/ackvec.h
+++ b/net/dccp/ackvec.h
@@ -13,6 +13,7 @@
13 13
14#include <linux/config.h> 14#include <linux/config.h>
15#include <linux/compiler.h> 15#include <linux/compiler.h>
16#include <linux/list.h>
16#include <linux/time.h> 17#include <linux/time.h>
17#include <linux/types.h> 18#include <linux/types.h>
18 19
@@ -42,39 +43,57 @@
42 * Ack Vectors it has recently sent. For each packet sent carrying an 43 * Ack Vectors it has recently sent. For each packet sent carrying an
43 * Ack Vector, it remembers four variables: 44 * Ack Vector, it remembers four variables:
44 * 45 *
45 * @dccpav_ack_seqno - the Sequence Number used for the packet
46 * (HC-Receiver seqno)
47 * @dccpav_ack_ptr - the value of buf_head at the time of acknowledgement. 46 * @dccpav_ack_ptr - the value of buf_head at the time of acknowledgement.
48 * @dccpav_ack_ackno - the Acknowledgement Number used for the packet 47 * @dccpav_records - list of dccp_ackvec_record
49 * (HC-Sender seqno)
50 * @dccpav_ack_nonce - the one-bit sum of the ECN Nonces for all State 0. 48 * @dccpav_ack_nonce - the one-bit sum of the ECN Nonces for all State 0.
51 * 49 *
52 * @dccpav_buf_len - circular buffer length
53 * @dccpav_time - the time in usecs 50 * @dccpav_time - the time in usecs
54 * @dccpav_buf - circular buffer of acknowledgeable packets 51 * @dccpav_buf - circular buffer of acknowledgeable packets
55 */ 52 */
56struct dccp_ackvec { 53struct dccp_ackvec {
57 u64 dccpav_buf_ackno; 54 u64 dccpav_buf_ackno;
58 u64 dccpav_ack_seqno; 55 struct list_head dccpav_records;
59 u64 dccpav_ack_ackno;
60 struct timeval dccpav_time; 56 struct timeval dccpav_time;
61 u8 dccpav_buf_head; 57 u8 dccpav_buf_head;
62 u8 dccpav_buf_tail; 58 u8 dccpav_buf_tail;
63 u8 dccpav_ack_ptr; 59 u8 dccpav_ack_ptr;
64 u8 dccpav_sent_len; 60 u8 dccpav_sent_len;
65 u8 dccpav_vec_len; 61 u8 dccpav_vec_len;
66 u8 dccpav_buf_len;
67 u8 dccpav_buf_nonce; 62 u8 dccpav_buf_nonce;
68 u8 dccpav_ack_nonce; 63 u8 dccpav_ack_nonce;
69 u8 dccpav_buf[0]; 64 u8 dccpav_buf[DCCP_MAX_ACKVEC_LEN];
65};
66
67/** struct dccp_ackvec_record - ack vector record
68 *
69 * ACK vector record as defined in Appendix A of spec.
70 *
71 * The list is sorted by dccpavr_ack_seqno
72 *
73 * @dccpavr_node - node in dccpav_records
74 * @dccpavr_ack_seqno - sequence number of the packet this record was sent on
75 * @dccpavr_ack_ackno - sequence number being acknowledged
76 * @dccpavr_ack_ptr - pointer into dccpav_buf where this record starts
77 * @dccpavr_ack_nonce - dccpav_ack_nonce at the time this record was sent
78 * @dccpavr_sent_len - lenght of the record in dccpav_buf
79 */
80struct dccp_ackvec_record {
81 struct list_head dccpavr_node;
82 u64 dccpavr_ack_seqno;
83 u64 dccpavr_ack_ackno;
84 u8 dccpavr_ack_ptr;
85 u8 dccpavr_ack_nonce;
86 u8 dccpavr_sent_len;
70}; 87};
71 88
72struct sock; 89struct sock;
73struct sk_buff; 90struct sk_buff;
74 91
75#ifdef CONFIG_IP_DCCP_ACKVEC 92#ifdef CONFIG_IP_DCCP_ACKVEC
76extern struct dccp_ackvec *dccp_ackvec_alloc(unsigned int len, 93extern int dccp_ackvec_init(void);
77 const gfp_t priority); 94extern void dccp_ackvec_exit(void);
95
96extern struct dccp_ackvec *dccp_ackvec_alloc(const gfp_t priority);
78extern void dccp_ackvec_free(struct dccp_ackvec *av); 97extern void dccp_ackvec_free(struct dccp_ackvec *av);
79 98
80extern int dccp_ackvec_add(struct dccp_ackvec *av, const struct sock *sk, 99extern int dccp_ackvec_add(struct dccp_ackvec *av, const struct sock *sk,
@@ -92,8 +111,16 @@ static inline int dccp_ackvec_pending(const struct dccp_ackvec *av)
92 return av->dccpav_sent_len != av->dccpav_vec_len; 111 return av->dccpav_sent_len != av->dccpav_vec_len;
93} 112}
94#else /* CONFIG_IP_DCCP_ACKVEC */ 113#else /* CONFIG_IP_DCCP_ACKVEC */
95static inline struct dccp_ackvec *dccp_ackvec_alloc(unsigned int len, 114static inline int dccp_ackvec_init(void)
96 const gfp_t priority) 115{
116 return 0;
117}
118
119static inline void dccp_ackvec_exit(void)
120{
121}
122
123static inline struct dccp_ackvec *dccp_ackvec_alloc(const gfp_t priority)
97{ 124{
98 return NULL; 125 return NULL;
99} 126}
diff --git a/net/dccp/ccid.c b/net/dccp/ccid.c
index 9d8fc0e289ea..ff05e59043cd 100644
--- a/net/dccp/ccid.c
+++ b/net/dccp/ccid.c
@@ -13,7 +13,7 @@
13 13
14#include "ccid.h" 14#include "ccid.h"
15 15
16static struct ccid *ccids[CCID_MAX]; 16static struct ccid_operations *ccids[CCID_MAX];
17#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT) 17#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
18static atomic_t ccids_lockct = ATOMIC_INIT(0); 18static atomic_t ccids_lockct = ATOMIC_INIT(0);
19static DEFINE_SPINLOCK(ccids_lock); 19static DEFINE_SPINLOCK(ccids_lock);
@@ -55,85 +55,202 @@ static inline void ccids_read_unlock(void)
55#define ccids_read_unlock() do { } while(0) 55#define ccids_read_unlock() do { } while(0)
56#endif 56#endif
57 57
58int ccid_register(struct ccid *ccid) 58static kmem_cache_t *ccid_kmem_cache_create(int obj_size, const char *fmt,...)
59{ 59{
60 int err; 60 kmem_cache_t *slab;
61 char slab_name_fmt[32], *slab_name;
62 va_list args;
61 63
62 if (ccid->ccid_init == NULL) 64 va_start(args, fmt);
63 return -1; 65 vsnprintf(slab_name_fmt, sizeof(slab_name_fmt), fmt, args);
66 va_end(args);
67
68 slab_name = kstrdup(slab_name_fmt, GFP_KERNEL);
69 if (slab_name == NULL)
70 return NULL;
71 slab = kmem_cache_create(slab_name, sizeof(struct ccid) + obj_size, 0,
72 SLAB_HWCACHE_ALIGN, NULL, NULL);
73 if (slab == NULL)
74 kfree(slab_name);
75 return slab;
76}
77
78static void ccid_kmem_cache_destroy(kmem_cache_t *slab)
79{
80 if (slab != NULL) {
81 const char *name = kmem_cache_name(slab);
82
83 kmem_cache_destroy(slab);
84 kfree(name);
85 }
86}
87
88int ccid_register(struct ccid_operations *ccid_ops)
89{
90 int err = -ENOBUFS;
91
92 ccid_ops->ccid_hc_rx_slab =
93 ccid_kmem_cache_create(ccid_ops->ccid_hc_rx_obj_size,
94 "%s_hc_rx_sock",
95 ccid_ops->ccid_name);
96 if (ccid_ops->ccid_hc_rx_slab == NULL)
97 goto out;
98
99 ccid_ops->ccid_hc_tx_slab =
100 ccid_kmem_cache_create(ccid_ops->ccid_hc_tx_obj_size,
101 "%s_hc_tx_sock",
102 ccid_ops->ccid_name);
103 if (ccid_ops->ccid_hc_tx_slab == NULL)
104 goto out_free_rx_slab;
64 105
65 ccids_write_lock(); 106 ccids_write_lock();
66 err = -EEXIST; 107 err = -EEXIST;
67 if (ccids[ccid->ccid_id] == NULL) { 108 if (ccids[ccid_ops->ccid_id] == NULL) {
68 ccids[ccid->ccid_id] = ccid; 109 ccids[ccid_ops->ccid_id] = ccid_ops;
69 err = 0; 110 err = 0;
70 } 111 }
71 ccids_write_unlock(); 112 ccids_write_unlock();
72 if (err == 0) 113 if (err != 0)
73 pr_info("CCID: Registered CCID %d (%s)\n", 114 goto out_free_tx_slab;
74 ccid->ccid_id, ccid->ccid_name); 115
116 pr_info("CCID: Registered CCID %d (%s)\n",
117 ccid_ops->ccid_id, ccid_ops->ccid_name);
118out:
75 return err; 119 return err;
120out_free_tx_slab:
121 ccid_kmem_cache_destroy(ccid_ops->ccid_hc_tx_slab);
122 ccid_ops->ccid_hc_tx_slab = NULL;
123 goto out;
124out_free_rx_slab:
125 ccid_kmem_cache_destroy(ccid_ops->ccid_hc_rx_slab);
126 ccid_ops->ccid_hc_rx_slab = NULL;
127 goto out;
76} 128}
77 129
78EXPORT_SYMBOL_GPL(ccid_register); 130EXPORT_SYMBOL_GPL(ccid_register);
79 131
80int ccid_unregister(struct ccid *ccid) 132int ccid_unregister(struct ccid_operations *ccid_ops)
81{ 133{
82 ccids_write_lock(); 134 ccids_write_lock();
83 ccids[ccid->ccid_id] = NULL; 135 ccids[ccid_ops->ccid_id] = NULL;
84 ccids_write_unlock(); 136 ccids_write_unlock();
137
138 ccid_kmem_cache_destroy(ccid_ops->ccid_hc_tx_slab);
139 ccid_ops->ccid_hc_tx_slab = NULL;
140 ccid_kmem_cache_destroy(ccid_ops->ccid_hc_rx_slab);
141 ccid_ops->ccid_hc_rx_slab = NULL;
142
85 pr_info("CCID: Unregistered CCID %d (%s)\n", 143 pr_info("CCID: Unregistered CCID %d (%s)\n",
86 ccid->ccid_id, ccid->ccid_name); 144 ccid_ops->ccid_id, ccid_ops->ccid_name);
87 return 0; 145 return 0;
88} 146}
89 147
90EXPORT_SYMBOL_GPL(ccid_unregister); 148EXPORT_SYMBOL_GPL(ccid_unregister);
91 149
92struct ccid *ccid_init(unsigned char id, struct sock *sk) 150struct ccid *ccid_new(unsigned char id, struct sock *sk, int rx, gfp_t gfp)
93{ 151{
94 struct ccid *ccid; 152 struct ccid_operations *ccid_ops;
153 struct ccid *ccid = NULL;
95 154
155 ccids_read_lock();
96#ifdef CONFIG_KMOD 156#ifdef CONFIG_KMOD
97 if (ccids[id] == NULL) 157 if (ccids[id] == NULL) {
158 /* We only try to load if in process context */
159 ccids_read_unlock();
160 if (gfp & GFP_ATOMIC)
161 goto out;
98 request_module("net-dccp-ccid-%d", id); 162 request_module("net-dccp-ccid-%d", id);
163 ccids_read_lock();
164 }
99#endif 165#endif
100 ccids_read_lock(); 166 ccid_ops = ccids[id];
167 if (ccid_ops == NULL)
168 goto out_unlock;
101 169
102 ccid = ccids[id]; 170 if (!try_module_get(ccid_ops->ccid_owner))
103 if (ccid == NULL) 171 goto out_unlock;
104 goto out;
105 172
106 if (!try_module_get(ccid->ccid_owner)) 173 ccids_read_unlock();
107 goto out_err;
108 174
109 if (ccid->ccid_init(sk) != 0) 175 ccid = kmem_cache_alloc(rx ? ccid_ops->ccid_hc_rx_slab :
176 ccid_ops->ccid_hc_tx_slab, gfp);
177 if (ccid == NULL)
110 goto out_module_put; 178 goto out_module_put;
179 ccid->ccid_ops = ccid_ops;
180 if (rx) {
181 memset(ccid + 1, 0, ccid_ops->ccid_hc_rx_obj_size);
182 if (ccid->ccid_ops->ccid_hc_rx_init != NULL &&
183 ccid->ccid_ops->ccid_hc_rx_init(ccid, sk) != 0)
184 goto out_free_ccid;
185 } else {
186 memset(ccid + 1, 0, ccid_ops->ccid_hc_tx_obj_size);
187 if (ccid->ccid_ops->ccid_hc_tx_init != NULL &&
188 ccid->ccid_ops->ccid_hc_tx_init(ccid, sk) != 0)
189 goto out_free_ccid;
190 }
111out: 191out:
112 ccids_read_unlock();
113 return ccid; 192 return ccid;
114out_module_put: 193out_unlock:
115 module_put(ccid->ccid_owner); 194 ccids_read_unlock();
116out_err: 195 goto out;
196out_free_ccid:
197 kmem_cache_free(rx ? ccid_ops->ccid_hc_rx_slab :
198 ccid_ops->ccid_hc_tx_slab, ccid);
117 ccid = NULL; 199 ccid = NULL;
200out_module_put:
201 module_put(ccid_ops->ccid_owner);
118 goto out; 202 goto out;
119} 203}
120 204
121EXPORT_SYMBOL_GPL(ccid_init); 205EXPORT_SYMBOL_GPL(ccid_new);
206
207struct ccid *ccid_hc_rx_new(unsigned char id, struct sock *sk, gfp_t gfp)
208{
209 return ccid_new(id, sk, 1, gfp);
210}
211
212EXPORT_SYMBOL_GPL(ccid_hc_rx_new);
213
214struct ccid *ccid_hc_tx_new(unsigned char id,struct sock *sk, gfp_t gfp)
215{
216 return ccid_new(id, sk, 0, gfp);
217}
218
219EXPORT_SYMBOL_GPL(ccid_hc_tx_new);
122 220
123void ccid_exit(struct ccid *ccid, struct sock *sk) 221static void ccid_delete(struct ccid *ccid, struct sock *sk, int rx)
124{ 222{
223 struct ccid_operations *ccid_ops;
224
125 if (ccid == NULL) 225 if (ccid == NULL)
126 return; 226 return;
127 227
228 ccid_ops = ccid->ccid_ops;
229 if (rx) {
230 if (ccid_ops->ccid_hc_rx_exit != NULL)
231 ccid_ops->ccid_hc_rx_exit(sk);
232 kmem_cache_free(ccid_ops->ccid_hc_rx_slab, ccid);
233 } else {
234 if (ccid_ops->ccid_hc_tx_exit != NULL)
235 ccid_ops->ccid_hc_tx_exit(sk);
236 kmem_cache_free(ccid_ops->ccid_hc_tx_slab, ccid);
237 }
128 ccids_read_lock(); 238 ccids_read_lock();
239 if (ccids[ccid_ops->ccid_id] != NULL)
240 module_put(ccid_ops->ccid_owner);
241 ccids_read_unlock();
242}
129 243
130 if (ccids[ccid->ccid_id] != NULL) { 244void ccid_hc_rx_delete(struct ccid *ccid, struct sock *sk)
131 if (ccid->ccid_exit != NULL) 245{
132 ccid->ccid_exit(sk); 246 ccid_delete(ccid, sk, 1);
133 module_put(ccid->ccid_owner); 247}
134 }
135 248
136 ccids_read_unlock(); 249EXPORT_SYMBOL_GPL(ccid_hc_rx_delete);
250
251void ccid_hc_tx_delete(struct ccid *ccid, struct sock *sk)
252{
253 ccid_delete(ccid, sk, 0);
137} 254}
138 255
139EXPORT_SYMBOL_GPL(ccid_exit); 256EXPORT_SYMBOL_GPL(ccid_hc_tx_delete);
diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h
index de681c6ad081..f7eb6c613414 100644
--- a/net/dccp/ccid.h
+++ b/net/dccp/ccid.h
@@ -23,14 +23,16 @@
23 23
24struct tcp_info; 24struct tcp_info;
25 25
26struct ccid { 26struct ccid_operations {
27 unsigned char ccid_id; 27 unsigned char ccid_id;
28 const char *ccid_name; 28 const char *ccid_name;
29 struct module *ccid_owner; 29 struct module *ccid_owner;
30 int (*ccid_init)(struct sock *sk); 30 kmem_cache_t *ccid_hc_rx_slab;
31 void (*ccid_exit)(struct sock *sk); 31 __u32 ccid_hc_rx_obj_size;
32 int (*ccid_hc_rx_init)(struct sock *sk); 32 kmem_cache_t *ccid_hc_tx_slab;
33 int (*ccid_hc_tx_init)(struct sock *sk); 33 __u32 ccid_hc_tx_obj_size;
34 int (*ccid_hc_rx_init)(struct ccid *ccid, struct sock *sk);
35 int (*ccid_hc_tx_init)(struct ccid *ccid, struct sock *sk);
34 void (*ccid_hc_rx_exit)(struct sock *sk); 36 void (*ccid_hc_rx_exit)(struct sock *sk);
35 void (*ccid_hc_tx_exit)(struct sock *sk); 37 void (*ccid_hc_tx_exit)(struct sock *sk);
36 void (*ccid_hc_rx_packet_recv)(struct sock *sk, 38 void (*ccid_hc_rx_packet_recv)(struct sock *sk,
@@ -39,9 +41,9 @@ struct ccid {
39 unsigned char option, 41 unsigned char option,
40 unsigned char len, u16 idx, 42 unsigned char len, u16 idx,
41 unsigned char* value); 43 unsigned char* value);
42 void (*ccid_hc_rx_insert_options)(struct sock *sk, 44 int (*ccid_hc_rx_insert_options)(struct sock *sk,
43 struct sk_buff *skb); 45 struct sk_buff *skb);
44 void (*ccid_hc_tx_insert_options)(struct sock *sk, 46 int (*ccid_hc_tx_insert_options)(struct sock *sk,
45 struct sk_buff *skb); 47 struct sk_buff *skb);
46 void (*ccid_hc_tx_packet_recv)(struct sock *sk, 48 void (*ccid_hc_tx_packet_recv)(struct sock *sk,
47 struct sk_buff *skb); 49 struct sk_buff *skb);
@@ -67,75 +69,58 @@ struct ccid {
67 int __user *optlen); 69 int __user *optlen);
68}; 70};
69 71
70extern int ccid_register(struct ccid *ccid); 72extern int ccid_register(struct ccid_operations *ccid_ops);
71extern int ccid_unregister(struct ccid *ccid); 73extern int ccid_unregister(struct ccid_operations *ccid_ops);
72 74
73extern struct ccid *ccid_init(unsigned char id, struct sock *sk); 75struct ccid {
74extern void ccid_exit(struct ccid *ccid, struct sock *sk); 76 struct ccid_operations *ccid_ops;
77 char ccid_priv[0];
78};
75 79
76static inline void __ccid_get(struct ccid *ccid) 80static inline void *ccid_priv(const struct ccid *ccid)
77{ 81{
78 __module_get(ccid->ccid_owner); 82 return (void *)ccid->ccid_priv;
79} 83}
80 84
85extern struct ccid *ccid_new(unsigned char id, struct sock *sk, int rx,
86 gfp_t gfp);
87
88extern struct ccid *ccid_hc_rx_new(unsigned char id, struct sock *sk,
89 gfp_t gfp);
90extern struct ccid *ccid_hc_tx_new(unsigned char id, struct sock *sk,
91 gfp_t gfp);
92
93extern void ccid_hc_rx_delete(struct ccid *ccid, struct sock *sk);
94extern void ccid_hc_tx_delete(struct ccid *ccid, struct sock *sk);
95
81static inline int ccid_hc_tx_send_packet(struct ccid *ccid, struct sock *sk, 96static inline int ccid_hc_tx_send_packet(struct ccid *ccid, struct sock *sk,
82 struct sk_buff *skb, int len) 97 struct sk_buff *skb, int len)
83{ 98{
84 int rc = 0; 99 int rc = 0;
85 if (ccid->ccid_hc_tx_send_packet != NULL) 100 if (ccid->ccid_ops->ccid_hc_tx_send_packet != NULL)
86 rc = ccid->ccid_hc_tx_send_packet(sk, skb, len); 101 rc = ccid->ccid_ops->ccid_hc_tx_send_packet(sk, skb, len);
87 return rc; 102 return rc;
88} 103}
89 104
90static inline void ccid_hc_tx_packet_sent(struct ccid *ccid, struct sock *sk, 105static inline void ccid_hc_tx_packet_sent(struct ccid *ccid, struct sock *sk,
91 int more, int len) 106 int more, int len)
92{ 107{
93 if (ccid->ccid_hc_tx_packet_sent != NULL) 108 if (ccid->ccid_ops->ccid_hc_tx_packet_sent != NULL)
94 ccid->ccid_hc_tx_packet_sent(sk, more, len); 109 ccid->ccid_ops->ccid_hc_tx_packet_sent(sk, more, len);
95}
96
97static inline int ccid_hc_rx_init(struct ccid *ccid, struct sock *sk)
98{
99 int rc = 0;
100 if (ccid->ccid_hc_rx_init != NULL)
101 rc = ccid->ccid_hc_rx_init(sk);
102 return rc;
103}
104
105static inline int ccid_hc_tx_init(struct ccid *ccid, struct sock *sk)
106{
107 int rc = 0;
108 if (ccid->ccid_hc_tx_init != NULL)
109 rc = ccid->ccid_hc_tx_init(sk);
110 return rc;
111}
112
113static inline void ccid_hc_rx_exit(struct ccid *ccid, struct sock *sk)
114{
115 if (ccid != NULL && ccid->ccid_hc_rx_exit != NULL &&
116 dccp_sk(sk)->dccps_hc_rx_ccid_private != NULL)
117 ccid->ccid_hc_rx_exit(sk);
118}
119
120static inline void ccid_hc_tx_exit(struct ccid *ccid, struct sock *sk)
121{
122 if (ccid != NULL && ccid->ccid_hc_tx_exit != NULL &&
123 dccp_sk(sk)->dccps_hc_tx_ccid_private != NULL)
124 ccid->ccid_hc_tx_exit(sk);
125} 110}
126 111
127static inline void ccid_hc_rx_packet_recv(struct ccid *ccid, struct sock *sk, 112static inline void ccid_hc_rx_packet_recv(struct ccid *ccid, struct sock *sk,
128 struct sk_buff *skb) 113 struct sk_buff *skb)
129{ 114{
130 if (ccid->ccid_hc_rx_packet_recv != NULL) 115 if (ccid->ccid_ops->ccid_hc_rx_packet_recv != NULL)
131 ccid->ccid_hc_rx_packet_recv(sk, skb); 116 ccid->ccid_ops->ccid_hc_rx_packet_recv(sk, skb);
132} 117}
133 118
134static inline void ccid_hc_tx_packet_recv(struct ccid *ccid, struct sock *sk, 119static inline void ccid_hc_tx_packet_recv(struct ccid *ccid, struct sock *sk,
135 struct sk_buff *skb) 120 struct sk_buff *skb)
136{ 121{
137 if (ccid->ccid_hc_tx_packet_recv != NULL) 122 if (ccid->ccid_ops->ccid_hc_tx_packet_recv != NULL)
138 ccid->ccid_hc_tx_packet_recv(sk, skb); 123 ccid->ccid_ops->ccid_hc_tx_packet_recv(sk, skb);
139} 124}
140 125
141static inline int ccid_hc_tx_parse_options(struct ccid *ccid, struct sock *sk, 126static inline int ccid_hc_tx_parse_options(struct ccid *ccid, struct sock *sk,
@@ -144,8 +129,8 @@ static inline int ccid_hc_tx_parse_options(struct ccid *ccid, struct sock *sk,
144 unsigned char* value) 129 unsigned char* value)
145{ 130{
146 int rc = 0; 131 int rc = 0;
147 if (ccid->ccid_hc_tx_parse_options != NULL) 132 if (ccid->ccid_ops->ccid_hc_tx_parse_options != NULL)
148 rc = ccid->ccid_hc_tx_parse_options(sk, option, len, idx, 133 rc = ccid->ccid_ops->ccid_hc_tx_parse_options(sk, option, len, idx,
149 value); 134 value);
150 return rc; 135 return rc;
151} 136}
@@ -156,37 +141,39 @@ static inline int ccid_hc_rx_parse_options(struct ccid *ccid, struct sock *sk,
156 unsigned char* value) 141 unsigned char* value)
157{ 142{
158 int rc = 0; 143 int rc = 0;
159 if (ccid->ccid_hc_rx_parse_options != NULL) 144 if (ccid->ccid_ops->ccid_hc_rx_parse_options != NULL)
160 rc = ccid->ccid_hc_rx_parse_options(sk, option, len, idx, value); 145 rc = ccid->ccid_ops->ccid_hc_rx_parse_options(sk, option, len, idx, value);
161 return rc; 146 return rc;
162} 147}
163 148
164static inline void ccid_hc_tx_insert_options(struct ccid *ccid, struct sock *sk, 149static inline int ccid_hc_tx_insert_options(struct ccid *ccid, struct sock *sk,
165 struct sk_buff *skb) 150 struct sk_buff *skb)
166{ 151{
167 if (ccid->ccid_hc_tx_insert_options != NULL) 152 if (ccid->ccid_ops->ccid_hc_tx_insert_options != NULL)
168 ccid->ccid_hc_tx_insert_options(sk, skb); 153 return ccid->ccid_ops->ccid_hc_tx_insert_options(sk, skb);
154 return 0;
169} 155}
170 156
171static inline void ccid_hc_rx_insert_options(struct ccid *ccid, struct sock *sk, 157static inline int ccid_hc_rx_insert_options(struct ccid *ccid, struct sock *sk,
172 struct sk_buff *skb) 158 struct sk_buff *skb)
173{ 159{
174 if (ccid->ccid_hc_rx_insert_options != NULL) 160 if (ccid->ccid_ops->ccid_hc_rx_insert_options != NULL)
175 ccid->ccid_hc_rx_insert_options(sk, skb); 161 return ccid->ccid_ops->ccid_hc_rx_insert_options(sk, skb);
162 return 0;
176} 163}
177 164
178static inline void ccid_hc_rx_get_info(struct ccid *ccid, struct sock *sk, 165static inline void ccid_hc_rx_get_info(struct ccid *ccid, struct sock *sk,
179 struct tcp_info *info) 166 struct tcp_info *info)
180{ 167{
181 if (ccid->ccid_hc_rx_get_info != NULL) 168 if (ccid->ccid_ops->ccid_hc_rx_get_info != NULL)
182 ccid->ccid_hc_rx_get_info(sk, info); 169 ccid->ccid_ops->ccid_hc_rx_get_info(sk, info);
183} 170}
184 171
185static inline void ccid_hc_tx_get_info(struct ccid *ccid, struct sock *sk, 172static inline void ccid_hc_tx_get_info(struct ccid *ccid, struct sock *sk,
186 struct tcp_info *info) 173 struct tcp_info *info)
187{ 174{
188 if (ccid->ccid_hc_tx_get_info != NULL) 175 if (ccid->ccid_ops->ccid_hc_tx_get_info != NULL)
189 ccid->ccid_hc_tx_get_info(sk, info); 176 ccid->ccid_ops->ccid_hc_tx_get_info(sk, info);
190} 177}
191 178
192static inline int ccid_hc_rx_getsockopt(struct ccid *ccid, struct sock *sk, 179static inline int ccid_hc_rx_getsockopt(struct ccid *ccid, struct sock *sk,
@@ -194,8 +181,8 @@ static inline int ccid_hc_rx_getsockopt(struct ccid *ccid, struct sock *sk,
194 u32 __user *optval, int __user *optlen) 181 u32 __user *optval, int __user *optlen)
195{ 182{
196 int rc = -ENOPROTOOPT; 183 int rc = -ENOPROTOOPT;
197 if (ccid->ccid_hc_rx_getsockopt != NULL) 184 if (ccid->ccid_ops->ccid_hc_rx_getsockopt != NULL)
198 rc = ccid->ccid_hc_rx_getsockopt(sk, optname, len, 185 rc = ccid->ccid_ops->ccid_hc_rx_getsockopt(sk, optname, len,
199 optval, optlen); 186 optval, optlen);
200 return rc; 187 return rc;
201} 188}
@@ -205,8 +192,8 @@ static inline int ccid_hc_tx_getsockopt(struct ccid *ccid, struct sock *sk,
205 u32 __user *optval, int __user *optlen) 192 u32 __user *optval, int __user *optlen)
206{ 193{
207 int rc = -ENOPROTOOPT; 194 int rc = -ENOPROTOOPT;
208 if (ccid->ccid_hc_tx_getsockopt != NULL) 195 if (ccid->ccid_ops->ccid_hc_tx_getsockopt != NULL)
209 rc = ccid->ccid_hc_tx_getsockopt(sk, optname, len, 196 rc = ccid->ccid_ops->ccid_hc_tx_getsockopt(sk, optname, len,
210 optval, optlen); 197 optval, optlen);
211 return rc; 198 return rc;
212} 199}
diff --git a/net/dccp/ccids/Kconfig b/net/dccp/ccids/Kconfig
index 7684d83946a4..ca00191628f7 100644
--- a/net/dccp/ccids/Kconfig
+++ b/net/dccp/ccids/Kconfig
@@ -1,9 +1,39 @@
1menu "DCCP CCIDs Configuration (EXPERIMENTAL)" 1menu "DCCP CCIDs Configuration (EXPERIMENTAL)"
2 depends on IP_DCCP && EXPERIMENTAL 2 depends on IP_DCCP && EXPERIMENTAL
3 3
4config IP_DCCP_CCID2
5 tristate "CCID2 (TCP-Like) (EXPERIMENTAL)"
6 depends on IP_DCCP
7 def_tristate IP_DCCP
8 select IP_DCCP_ACKVEC
9 ---help---
10 CCID 2, TCP-like Congestion Control, denotes Additive Increase,
11 Multiplicative Decrease (AIMD) congestion control with behavior
12 modelled directly on TCP, including congestion window, slow start,
13 timeouts, and so forth [RFC 2581]. CCID 2 achieves maximum
14 bandwidth over the long term, consistent with the use of end-to-end
15 congestion control, but halves its congestion window in response to
16 each congestion event. This leads to the abrupt rate changes
17 typical of TCP. Applications should use CCID 2 if they prefer
18 maximum bandwidth utilization to steadiness of rate. This is often
19 the case for applications that are not playing their data directly
20 to the user. For example, a hypothetical application that
21 transferred files over DCCP, using application-level retransmissions
22 for lost packets, would prefer CCID 2 to CCID 3. On-line games may
23 also prefer CCID 2.
24
25 CCID 2 is further described in:
26 http://www.icir.org/kohler/dccp/draft-ietf-dccp-ccid2-10.txt
27
28 This text was extracted from:
29 http://www.icir.org/kohler/dccp/draft-ietf-dccp-spec-13.txt
30
31 If in doubt, say M.
32
4config IP_DCCP_CCID3 33config IP_DCCP_CCID3
5 tristate "CCID3 (TFRC) (EXPERIMENTAL)" 34 tristate "CCID3 (TCP-Friendly) (EXPERIMENTAL)"
6 depends on IP_DCCP 35 depends on IP_DCCP
36 def_tristate IP_DCCP
7 ---help--- 37 ---help---
8 CCID 3 denotes TCP-Friendly Rate Control (TFRC), an equation-based 38 CCID 3 denotes TCP-Friendly Rate Control (TFRC), an equation-based
9 rate-controlled congestion control mechanism. TFRC is designed to 39 rate-controlled congestion control mechanism. TFRC is designed to
@@ -15,10 +45,15 @@ config IP_DCCP_CCID3
15 suitable than CCID 2 for applications such streaming media where a 45 suitable than CCID 2 for applications such streaming media where a
16 relatively smooth sending rate is of importance. 46 relatively smooth sending rate is of importance.
17 47
18 CCID 3 is further described in [CCID 3 PROFILE]. The TFRC 48 CCID 3 is further described in:
19 congestion control algorithms were initially described in RFC 3448. 49
50 http://www.icir.org/kohler/dccp/draft-ietf-dccp-ccid3-11.txt.
51
52 The TFRC congestion control algorithms were initially described in
53 RFC 3448.
20 54
21 This text was extracted from draft-ietf-dccp-spec-11.txt. 55 This text was extracted from:
56 http://www.icir.org/kohler/dccp/draft-ietf-dccp-spec-13.txt
22 57
23 If in doubt, say M. 58 If in doubt, say M.
24 59
diff --git a/net/dccp/ccids/Makefile b/net/dccp/ccids/Makefile
index 956f79f50743..438f20bccff7 100644
--- a/net/dccp/ccids/Makefile
+++ b/net/dccp/ccids/Makefile
@@ -2,4 +2,8 @@ obj-$(CONFIG_IP_DCCP_CCID3) += dccp_ccid3.o
2 2
3dccp_ccid3-y := ccid3.o 3dccp_ccid3-y := ccid3.o
4 4
5obj-$(CONFIG_IP_DCCP_CCID2) += dccp_ccid2.o
6
7dccp_ccid2-y := ccid2.o
8
5obj-y += lib/ 9obj-y += lib/
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
new file mode 100644
index 000000000000..d4f9e2d33453
--- /dev/null
+++ b/net/dccp/ccids/ccid2.c
@@ -0,0 +1,779 @@
1/*
2 * net/dccp/ccids/ccid2.c
3 *
4 * Copyright (c) 2005, 2006 Andrea Bittau <a.bittau@cs.ucl.ac.uk>
5 *
6 * Changes to meet Linux coding standards, and DCCP infrastructure fixes.
7 *
8 * Copyright (c) 2006 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25/*
26 * This implementation should follow: draft-ietf-dccp-ccid2-10.txt
27 *
28 * BUGS:
29 * - sequence number wrapping
30 * - jiffies wrapping
31 */
32
33#include <linux/config.h>
34#include "../ccid.h"
35#include "../dccp.h"
36#include "ccid2.h"
37
38static int ccid2_debug;
39
40#undef CCID2_DEBUG
41#ifdef CCID2_DEBUG
42#define ccid2_pr_debug(format, a...) \
43 do { if (ccid2_debug) \
44 printk(KERN_DEBUG "%s: " format, __FUNCTION__, ##a); \
45 } while (0)
46#else
47#define ccid2_pr_debug(format, a...)
48#endif
49
50static const int ccid2_seq_len = 128;
51
52#ifdef CCID2_DEBUG
53static void ccid2_hc_tx_check_sanity(const struct ccid2_hc_tx_sock *hctx)
54{
55 int len = 0;
56 int pipe = 0;
57 struct ccid2_seq *seqp = hctx->ccid2hctx_seqh;
58
59 /* there is data in the chain */
60 if (seqp != hctx->ccid2hctx_seqt) {
61 seqp = seqp->ccid2s_prev;
62 len++;
63 if (!seqp->ccid2s_acked)
64 pipe++;
65
66 while (seqp != hctx->ccid2hctx_seqt) {
67 struct ccid2_seq *prev = seqp->ccid2s_prev;
68
69 len++;
70 if (!prev->ccid2s_acked)
71 pipe++;
72
73 /* packets are sent sequentially */
74 BUG_ON(seqp->ccid2s_seq <= prev->ccid2s_seq);
75 BUG_ON(seqp->ccid2s_sent < prev->ccid2s_sent);
76 BUG_ON(len > ccid2_seq_len);
77
78 seqp = prev;
79 }
80 }
81
82 BUG_ON(pipe != hctx->ccid2hctx_pipe);
83 ccid2_pr_debug("len of chain=%d\n", len);
84
85 do {
86 seqp = seqp->ccid2s_prev;
87 len++;
88 BUG_ON(len > ccid2_seq_len);
89 } while (seqp != hctx->ccid2hctx_seqh);
90
91 BUG_ON(len != ccid2_seq_len);
92 ccid2_pr_debug("total len=%d\n", len);
93}
94#else
95#define ccid2_hc_tx_check_sanity(hctx) do {} while (0)
96#endif
97
98static int ccid2_hc_tx_send_packet(struct sock *sk,
99 struct sk_buff *skb, int len)
100{
101 struct ccid2_hc_tx_sock *hctx;
102
103 switch (DCCP_SKB_CB(skb)->dccpd_type) {
104 case 0: /* XXX data packets from userland come through like this */
105 case DCCP_PKT_DATA:
106 case DCCP_PKT_DATAACK:
107 break;
108 /* No congestion control on other packets */
109 default:
110 return 0;
111 }
112
113 hctx = ccid2_hc_tx_sk(sk);
114
115 ccid2_pr_debug("pipe=%d cwnd=%d\n", hctx->ccid2hctx_pipe,
116 hctx->ccid2hctx_cwnd);
117
118 if (hctx->ccid2hctx_pipe < hctx->ccid2hctx_cwnd) {
119 /* OK we can send... make sure previous packet was sent off */
120 if (!hctx->ccid2hctx_sendwait) {
121 hctx->ccid2hctx_sendwait = 1;
122 return 0;
123 }
124 }
125
126 return 100; /* XXX */
127}
128
129static void ccid2_change_l_ack_ratio(struct sock *sk, int val)
130{
131 struct dccp_sock *dp = dccp_sk(sk);
132 /*
133 * XXX I don't really agree with val != 2. If cwnd is 1, ack ratio
134 * should be 1... it shouldn't be allowed to become 2.
135 * -sorbo.
136 */
137 if (val != 2) {
138 const struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk);
139 int max = hctx->ccid2hctx_cwnd / 2;
140
141 /* round up */
142 if (hctx->ccid2hctx_cwnd & 1)
143 max++;
144
145 if (val > max)
146 val = max;
147 }
148
149 ccid2_pr_debug("changing local ack ratio to %d\n", val);
150 WARN_ON(val <= 0);
151 dp->dccps_l_ack_ratio = val;
152}
153
154static void ccid2_change_cwnd(struct sock *sk, int val)
155{
156 struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk);
157
158 if (val == 0)
159 val = 1;
160
161 /* XXX do we need to change ack ratio? */
162 ccid2_pr_debug("change cwnd to %d\n", val);
163
164 BUG_ON(val < 1);
165 hctx->ccid2hctx_cwnd = val;
166}
167
168static void ccid2_start_rto_timer(struct sock *sk);
169
170static void ccid2_hc_tx_rto_expire(unsigned long data)
171{
172 struct sock *sk = (struct sock *)data;
173 struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk);
174 long s;
175
176 bh_lock_sock(sk);
177 if (sock_owned_by_user(sk)) {
178 sk_reset_timer(sk, &hctx->ccid2hctx_rtotimer,
179 jiffies + HZ / 5);
180 goto out;
181 }
182
183 ccid2_pr_debug("RTO_EXPIRE\n");
184
185 ccid2_hc_tx_check_sanity(hctx);
186
187 /* back-off timer */
188 hctx->ccid2hctx_rto <<= 1;
189
190 s = hctx->ccid2hctx_rto / HZ;
191 if (s > 60)
192 hctx->ccid2hctx_rto = 60 * HZ;
193
194 ccid2_start_rto_timer(sk);
195
196 /* adjust pipe, cwnd etc */
197 hctx->ccid2hctx_pipe = 0;
198 hctx->ccid2hctx_ssthresh = hctx->ccid2hctx_cwnd >> 1;
199 if (hctx->ccid2hctx_ssthresh < 2)
200 hctx->ccid2hctx_ssthresh = 2;
201 ccid2_change_cwnd(sk, 1);
202
203 /* clear state about stuff we sent */
204 hctx->ccid2hctx_seqt = hctx->ccid2hctx_seqh;
205 hctx->ccid2hctx_ssacks = 0;
206 hctx->ccid2hctx_acks = 0;
207 hctx->ccid2hctx_sent = 0;
208
209 /* clear ack ratio state. */
210 hctx->ccid2hctx_arsent = 0;
211 hctx->ccid2hctx_ackloss = 0;
212 hctx->ccid2hctx_rpseq = 0;
213 hctx->ccid2hctx_rpdupack = -1;
214 ccid2_change_l_ack_ratio(sk, 1);
215 ccid2_hc_tx_check_sanity(hctx);
216out:
217 bh_unlock_sock(sk);
218 sock_put(sk);
219}
220
221static void ccid2_start_rto_timer(struct sock *sk)
222{
223 struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk);
224
225 ccid2_pr_debug("setting RTO timeout=%ld\n", hctx->ccid2hctx_rto);
226
227 BUG_ON(timer_pending(&hctx->ccid2hctx_rtotimer));
228 sk_reset_timer(sk, &hctx->ccid2hctx_rtotimer,
229 jiffies + hctx->ccid2hctx_rto);
230}
231
232static void ccid2_hc_tx_packet_sent(struct sock *sk, int more, int len)
233{
234 struct dccp_sock *dp = dccp_sk(sk);
235 struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk);
236 u64 seq;
237
238 ccid2_hc_tx_check_sanity(hctx);
239
240 BUG_ON(!hctx->ccid2hctx_sendwait);
241 hctx->ccid2hctx_sendwait = 0;
242 hctx->ccid2hctx_pipe++;
243 BUG_ON(hctx->ccid2hctx_pipe < 0);
244
245 /* There is an issue. What if another packet is sent between
246 * packet_send() and packet_sent(). Then the sequence number would be
247 * wrong.
248 * -sorbo.
249 */
250 seq = dp->dccps_gss;
251
252 hctx->ccid2hctx_seqh->ccid2s_seq = seq;
253 hctx->ccid2hctx_seqh->ccid2s_acked = 0;
254 hctx->ccid2hctx_seqh->ccid2s_sent = jiffies;
255 hctx->ccid2hctx_seqh = hctx->ccid2hctx_seqh->ccid2s_next;
256
257 ccid2_pr_debug("cwnd=%d pipe=%d\n", hctx->ccid2hctx_cwnd,
258 hctx->ccid2hctx_pipe);
259
260 if (hctx->ccid2hctx_seqh == hctx->ccid2hctx_seqt) {
261 /* XXX allocate more space */
262 WARN_ON(1);
263 }
264
265 hctx->ccid2hctx_sent++;
266
267 /* Ack Ratio. Need to maintain a concept of how many windows we sent */
268 hctx->ccid2hctx_arsent++;
269 /* We had an ack loss in this window... */
270 if (hctx->ccid2hctx_ackloss) {
271 if (hctx->ccid2hctx_arsent >= hctx->ccid2hctx_cwnd) {
272 hctx->ccid2hctx_arsent = 0;
273 hctx->ccid2hctx_ackloss = 0;
274 }
275 } else {
276 /* No acks lost up to now... */
277 /* decrease ack ratio if enough packets were sent */
278 if (dp->dccps_l_ack_ratio > 1) {
279 /* XXX don't calculate denominator each time */
280 int denom = dp->dccps_l_ack_ratio * dp->dccps_l_ack_ratio -
281 dp->dccps_l_ack_ratio;
282
283 denom = hctx->ccid2hctx_cwnd * hctx->ccid2hctx_cwnd / denom;
284
285 if (hctx->ccid2hctx_arsent >= denom) {
286 ccid2_change_l_ack_ratio(sk, dp->dccps_l_ack_ratio - 1);
287 hctx->ccid2hctx_arsent = 0;
288 }
289 } else {
290 /* we can't increase ack ratio further [1] */
291 hctx->ccid2hctx_arsent = 0; /* or maybe set it to cwnd*/
292 }
293 }
294
295 /* setup RTO timer */
296 if (!timer_pending(&hctx->ccid2hctx_rtotimer))
297 ccid2_start_rto_timer(sk);
298
299#ifdef CCID2_DEBUG
300 ccid2_pr_debug("pipe=%d\n", hctx->ccid2hctx_pipe);
301 ccid2_pr_debug("Sent: seq=%llu\n", seq);
302 do {
303 struct ccid2_seq *seqp = hctx->ccid2hctx_seqt;
304
305 while (seqp != hctx->ccid2hctx_seqh) {
306 ccid2_pr_debug("out seq=%llu acked=%d time=%lu\n",
307 seqp->ccid2s_seq, seqp->ccid2s_acked,
308 seqp->ccid2s_sent);
309 seqp = seqp->ccid2s_next;
310 }
311 } while (0);
312 ccid2_pr_debug("=========\n");
313 ccid2_hc_tx_check_sanity(hctx);
314#endif
315}
316
317/* XXX Lame code duplication!
318 * returns -1 if none was found.
319 * else returns the next offset to use in the function call.
320 */
321static int ccid2_ackvector(struct sock *sk, struct sk_buff *skb, int offset,
322 unsigned char **vec, unsigned char *veclen)
323{
324 const struct dccp_hdr *dh = dccp_hdr(skb);
325 unsigned char *options = (unsigned char *)dh + dccp_hdr_len(skb);
326 unsigned char *opt_ptr;
327 const unsigned char *opt_end = (unsigned char *)dh +
328 (dh->dccph_doff * 4);
329 unsigned char opt, len;
330 unsigned char *value;
331
332 BUG_ON(offset < 0);
333 options += offset;
334 opt_ptr = options;
335 if (opt_ptr >= opt_end)
336 return -1;
337
338 while (opt_ptr != opt_end) {
339 opt = *opt_ptr++;
340 len = 0;
341 value = NULL;
342
343 /* Check if this isn't a single byte option */
344 if (opt > DCCPO_MAX_RESERVED) {
345 if (opt_ptr == opt_end)
346 goto out_invalid_option;
347
348 len = *opt_ptr++;
349 if (len < 3)
350 goto out_invalid_option;
351 /*
352 * Remove the type and len fields, leaving
353 * just the value size
354 */
355 len -= 2;
356 value = opt_ptr;
357 opt_ptr += len;
358
359 if (opt_ptr > opt_end)
360 goto out_invalid_option;
361 }
362
363 switch (opt) {
364 case DCCPO_ACK_VECTOR_0:
365 case DCCPO_ACK_VECTOR_1:
366 *vec = value;
367 *veclen = len;
368 return offset + (opt_ptr - options);
369 }
370 }
371
372 return -1;
373
374out_invalid_option:
375 BUG_ON(1); /* should never happen... options were previously parsed ! */
376 return -1;
377}
378
379static void ccid2_hc_tx_kill_rto_timer(struct sock *sk)
380{
381 struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk);
382
383 sk_stop_timer(sk, &hctx->ccid2hctx_rtotimer);
384 ccid2_pr_debug("deleted RTO timer\n");
385}
386
387static inline void ccid2_new_ack(struct sock *sk,
388 struct ccid2_seq *seqp,
389 unsigned int *maxincr)
390{
391 struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk);
392
393 /* slow start */
394 if (hctx->ccid2hctx_cwnd < hctx->ccid2hctx_ssthresh) {
395 hctx->ccid2hctx_acks = 0;
396
397 /* We can increase cwnd at most maxincr [ack_ratio/2] */
398 if (*maxincr) {
399 /* increase every 2 acks */
400 hctx->ccid2hctx_ssacks++;
401 if (hctx->ccid2hctx_ssacks == 2) {
402 ccid2_change_cwnd(sk, hctx->ccid2hctx_cwnd + 1);
403 hctx->ccid2hctx_ssacks = 0;
404 *maxincr = *maxincr - 1;
405 }
406 } else {
407 /* increased cwnd enough for this single ack */
408 hctx->ccid2hctx_ssacks = 0;
409 }
410 } else {
411 hctx->ccid2hctx_ssacks = 0;
412 hctx->ccid2hctx_acks++;
413
414 if (hctx->ccid2hctx_acks >= hctx->ccid2hctx_cwnd) {
415 ccid2_change_cwnd(sk, hctx->ccid2hctx_cwnd + 1);
416 hctx->ccid2hctx_acks = 0;
417 }
418 }
419
420 /* update RTO */
421 if (hctx->ccid2hctx_srtt == -1 ||
422 (jiffies - hctx->ccid2hctx_lastrtt) >= hctx->ccid2hctx_srtt) {
423 unsigned long r = jiffies - seqp->ccid2s_sent;
424 int s;
425
426 /* first measurement */
427 if (hctx->ccid2hctx_srtt == -1) {
428 ccid2_pr_debug("R: %lu Time=%lu seq=%llu\n",
429 r, jiffies, seqp->ccid2s_seq);
430 hctx->ccid2hctx_srtt = r;
431 hctx->ccid2hctx_rttvar = r >> 1;
432 } else {
433 /* RTTVAR */
434 long tmp = hctx->ccid2hctx_srtt - r;
435 if (tmp < 0)
436 tmp *= -1;
437
438 tmp >>= 2;
439 hctx->ccid2hctx_rttvar *= 3;
440 hctx->ccid2hctx_rttvar >>= 2;
441 hctx->ccid2hctx_rttvar += tmp;
442
443 /* SRTT */
444 hctx->ccid2hctx_srtt *= 7;
445 hctx->ccid2hctx_srtt >>= 3;
446 tmp = r >> 3;
447 hctx->ccid2hctx_srtt += tmp;
448 }
449 s = hctx->ccid2hctx_rttvar << 2;
450 /* clock granularity is 1 when based on jiffies */
451 if (!s)
452 s = 1;
453 hctx->ccid2hctx_rto = hctx->ccid2hctx_srtt + s;
454
455 /* must be at least a second */
456 s = hctx->ccid2hctx_rto / HZ;
457 /* DCCP doesn't require this [but I like it cuz my code sux] */
458#if 1
459 if (s < 1)
460 hctx->ccid2hctx_rto = HZ;
461#endif
462 /* max 60 seconds */
463 if (s > 60)
464 hctx->ccid2hctx_rto = HZ * 60;
465
466 hctx->ccid2hctx_lastrtt = jiffies;
467
468 ccid2_pr_debug("srtt: %ld rttvar: %ld rto: %ld (HZ=%d) R=%lu\n",
469 hctx->ccid2hctx_srtt, hctx->ccid2hctx_rttvar,
470 hctx->ccid2hctx_rto, HZ, r);
471 hctx->ccid2hctx_sent = 0;
472 }
473
474 /* we got a new ack, so re-start RTO timer */
475 ccid2_hc_tx_kill_rto_timer(sk);
476 ccid2_start_rto_timer(sk);
477}
478
479static void ccid2_hc_tx_dec_pipe(struct sock *sk)
480{
481 struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk);
482
483 hctx->ccid2hctx_pipe--;
484 BUG_ON(hctx->ccid2hctx_pipe < 0);
485
486 if (hctx->ccid2hctx_pipe == 0)
487 ccid2_hc_tx_kill_rto_timer(sk);
488}
489
490static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
491{
492 struct dccp_sock *dp = dccp_sk(sk);
493 struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk);
494 u64 ackno, seqno;
495 struct ccid2_seq *seqp;
496 unsigned char *vector;
497 unsigned char veclen;
498 int offset = 0;
499 int done = 0;
500 int loss = 0;
501 unsigned int maxincr = 0;
502
503 ccid2_hc_tx_check_sanity(hctx);
504 /* check reverse path congestion */
505 seqno = DCCP_SKB_CB(skb)->dccpd_seq;
506
507 /* XXX this whole "algorithm" is broken. Need to fix it to keep track
508 * of the seqnos of the dupacks so that rpseq and rpdupack are correct
509 * -sorbo.
510 */
511 /* need to bootstrap */
512 if (hctx->ccid2hctx_rpdupack == -1) {
513 hctx->ccid2hctx_rpdupack = 0;
514 hctx->ccid2hctx_rpseq = seqno;
515 } else {
516 /* check if packet is consecutive */
517 if ((hctx->ccid2hctx_rpseq + 1) == seqno)
518 hctx->ccid2hctx_rpseq++;
519 /* it's a later packet */
520 else if (after48(seqno, hctx->ccid2hctx_rpseq)) {
521 hctx->ccid2hctx_rpdupack++;
522
523 /* check if we got enough dupacks */
524 if (hctx->ccid2hctx_rpdupack >=
525 hctx->ccid2hctx_numdupack) {
526 hctx->ccid2hctx_rpdupack = -1; /* XXX lame */
527 hctx->ccid2hctx_rpseq = 0;
528
529 ccid2_change_l_ack_ratio(sk, dp->dccps_l_ack_ratio << 1);
530 }
531 }
532 }
533
534 /* check forward path congestion */
535 /* still didn't send out new data packets */
536 if (hctx->ccid2hctx_seqh == hctx->ccid2hctx_seqt)
537 return;
538
539 switch (DCCP_SKB_CB(skb)->dccpd_type) {
540 case DCCP_PKT_ACK:
541 case DCCP_PKT_DATAACK:
542 break;
543 default:
544 return;
545 }
546
547 ackno = DCCP_SKB_CB(skb)->dccpd_ack_seq;
548 seqp = hctx->ccid2hctx_seqh->ccid2s_prev;
549
550 /* If in slow-start, cwnd can increase at most Ack Ratio / 2 packets for
551 * this single ack. I round up.
552 * -sorbo.
553 */
554 maxincr = dp->dccps_l_ack_ratio >> 1;
555 maxincr++;
556
557 /* go through all ack vectors */
558 while ((offset = ccid2_ackvector(sk, skb, offset,
559 &vector, &veclen)) != -1) {
560 /* go through this ack vector */
561 while (veclen--) {
562 const u8 rl = *vector & DCCP_ACKVEC_LEN_MASK;
563 u64 ackno_end_rl;
564
565 dccp_set_seqno(&ackno_end_rl, ackno - rl);
566 ccid2_pr_debug("ackvec start:%llu end:%llu\n", ackno,
567 ackno_end_rl);
568 /* if the seqno we are analyzing is larger than the
569 * current ackno, then move towards the tail of our
570 * seqnos.
571 */
572 while (after48(seqp->ccid2s_seq, ackno)) {
573 if (seqp == hctx->ccid2hctx_seqt) {
574 done = 1;
575 break;
576 }
577 seqp = seqp->ccid2s_prev;
578 }
579 if (done)
580 break;
581
582 /* check all seqnos in the range of the vector
583 * run length
584 */
585 while (between48(seqp->ccid2s_seq,ackno_end_rl,ackno)) {
586 const u8 state = (*vector &
587 DCCP_ACKVEC_STATE_MASK) >> 6;
588
589 /* new packet received or marked */
590 if (state != DCCP_ACKVEC_STATE_NOT_RECEIVED &&
591 !seqp->ccid2s_acked) {
592 if (state ==
593 DCCP_ACKVEC_STATE_ECN_MARKED) {
594 loss = 1;
595 } else
596 ccid2_new_ack(sk, seqp,
597 &maxincr);
598
599 seqp->ccid2s_acked = 1;
600 ccid2_pr_debug("Got ack for %llu\n",
601 seqp->ccid2s_seq);
602 ccid2_hc_tx_dec_pipe(sk);
603 }
604 if (seqp == hctx->ccid2hctx_seqt) {
605 done = 1;
606 break;
607 }
608 seqp = seqp->ccid2s_next;
609 }
610 if (done)
611 break;
612
613
614 dccp_set_seqno(&ackno, ackno_end_rl - 1);
615 vector++;
616 }
617 if (done)
618 break;
619 }
620
621 /* The state about what is acked should be correct now
622 * Check for NUMDUPACK
623 */
624 seqp = hctx->ccid2hctx_seqh->ccid2s_prev;
625 done = 0;
626 while (1) {
627 if (seqp->ccid2s_acked) {
628 done++;
629 if (done == hctx->ccid2hctx_numdupack)
630 break;
631 }
632 if (seqp == hctx->ccid2hctx_seqt)
633 break;
634 seqp = seqp->ccid2s_prev;
635 }
636
637 /* If there are at least 3 acknowledgements, anything unacknowledged
638 * below the last sequence number is considered lost
639 */
640 if (done == hctx->ccid2hctx_numdupack) {
641 struct ccid2_seq *last_acked = seqp;
642
643 /* check for lost packets */
644 while (1) {
645 if (!seqp->ccid2s_acked) {
646 loss = 1;
647 ccid2_hc_tx_dec_pipe(sk);
648 }
649 if (seqp == hctx->ccid2hctx_seqt)
650 break;
651 seqp = seqp->ccid2s_prev;
652 }
653
654 hctx->ccid2hctx_seqt = last_acked;
655 }
656
657 /* trim acked packets in tail */
658 while (hctx->ccid2hctx_seqt != hctx->ccid2hctx_seqh) {
659 if (!hctx->ccid2hctx_seqt->ccid2s_acked)
660 break;
661
662 hctx->ccid2hctx_seqt = hctx->ccid2hctx_seqt->ccid2s_next;
663 }
664
665 if (loss) {
666 /* XXX do bit shifts guarantee a 0 as the new bit? */
667 ccid2_change_cwnd(sk, hctx->ccid2hctx_cwnd >> 1);
668 hctx->ccid2hctx_ssthresh = hctx->ccid2hctx_cwnd;
669 if (hctx->ccid2hctx_ssthresh < 2)
670 hctx->ccid2hctx_ssthresh = 2;
671 }
672
673 ccid2_hc_tx_check_sanity(hctx);
674}
675
676static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk)
677{
678 struct ccid2_hc_tx_sock *hctx = ccid_priv(ccid);
679 int seqcount = ccid2_seq_len;
680 int i;
681
682 /* XXX init variables with proper values */
683 hctx->ccid2hctx_cwnd = 1;
684 hctx->ccid2hctx_ssthresh = 10;
685 hctx->ccid2hctx_numdupack = 3;
686
687 /* XXX init ~ to window size... */
688 hctx->ccid2hctx_seqbuf = kmalloc(sizeof(*hctx->ccid2hctx_seqbuf) *
689 seqcount, gfp_any());
690 if (hctx->ccid2hctx_seqbuf == NULL)
691 return -ENOMEM;
692
693 for (i = 0; i < (seqcount - 1); i++) {
694 hctx->ccid2hctx_seqbuf[i].ccid2s_next =
695 &hctx->ccid2hctx_seqbuf[i + 1];
696 hctx->ccid2hctx_seqbuf[i + 1].ccid2s_prev =
697 &hctx->ccid2hctx_seqbuf[i];
698 }
699 hctx->ccid2hctx_seqbuf[seqcount - 1].ccid2s_next =
700 hctx->ccid2hctx_seqbuf;
701 hctx->ccid2hctx_seqbuf->ccid2s_prev =
702 &hctx->ccid2hctx_seqbuf[seqcount - 1];
703
704 hctx->ccid2hctx_seqh = hctx->ccid2hctx_seqbuf;
705 hctx->ccid2hctx_seqt = hctx->ccid2hctx_seqh;
706 hctx->ccid2hctx_sent = 0;
707 hctx->ccid2hctx_rto = 3 * HZ;
708 hctx->ccid2hctx_srtt = -1;
709 hctx->ccid2hctx_rttvar = -1;
710 hctx->ccid2hctx_lastrtt = 0;
711 hctx->ccid2hctx_rpdupack = -1;
712
713 hctx->ccid2hctx_rtotimer.function = &ccid2_hc_tx_rto_expire;
714 hctx->ccid2hctx_rtotimer.data = (unsigned long)sk;
715 init_timer(&hctx->ccid2hctx_rtotimer);
716
717 ccid2_hc_tx_check_sanity(hctx);
718 return 0;
719}
720
721static void ccid2_hc_tx_exit(struct sock *sk)
722{
723 struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk);
724
725 ccid2_hc_tx_kill_rto_timer(sk);
726 kfree(hctx->ccid2hctx_seqbuf);
727 hctx->ccid2hctx_seqbuf = NULL;
728}
729
730static void ccid2_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
731{
732 const struct dccp_sock *dp = dccp_sk(sk);
733 struct ccid2_hc_rx_sock *hcrx = ccid2_hc_rx_sk(sk);
734
735 switch (DCCP_SKB_CB(skb)->dccpd_type) {
736 case DCCP_PKT_DATA:
737 case DCCP_PKT_DATAACK:
738 hcrx->ccid2hcrx_data++;
739 if (hcrx->ccid2hcrx_data >= dp->dccps_r_ack_ratio) {
740 dccp_send_ack(sk);
741 hcrx->ccid2hcrx_data = 0;
742 }
743 break;
744 }
745}
746
747static struct ccid_operations ccid2 = {
748 .ccid_id = 2,
749 .ccid_name = "ccid2",
750 .ccid_owner = THIS_MODULE,
751 .ccid_hc_tx_obj_size = sizeof(struct ccid2_hc_tx_sock),
752 .ccid_hc_tx_init = ccid2_hc_tx_init,
753 .ccid_hc_tx_exit = ccid2_hc_tx_exit,
754 .ccid_hc_tx_send_packet = ccid2_hc_tx_send_packet,
755 .ccid_hc_tx_packet_sent = ccid2_hc_tx_packet_sent,
756 .ccid_hc_tx_packet_recv = ccid2_hc_tx_packet_recv,
757 .ccid_hc_rx_obj_size = sizeof(struct ccid2_hc_rx_sock),
758 .ccid_hc_rx_packet_recv = ccid2_hc_rx_packet_recv,
759};
760
761module_param(ccid2_debug, int, 0444);
762MODULE_PARM_DESC(ccid2_debug, "Enable debug messages");
763
764static __init int ccid2_module_init(void)
765{
766 return ccid_register(&ccid2);
767}
768module_init(ccid2_module_init);
769
770static __exit void ccid2_module_exit(void)
771{
772 ccid_unregister(&ccid2);
773}
774module_exit(ccid2_module_exit);
775
776MODULE_AUTHOR("Andrea Bittau <a.bittau@cs.ucl.ac.uk>");
777MODULE_DESCRIPTION("DCCP TCP-Like (CCID2) CCID");
778MODULE_LICENSE("GPL");
779MODULE_ALIAS("net-dccp-ccid-2");
diff --git a/net/dccp/ccids/ccid2.h b/net/dccp/ccids/ccid2.h
new file mode 100644
index 000000000000..451a87464fa5
--- /dev/null
+++ b/net/dccp/ccids/ccid2.h
@@ -0,0 +1,85 @@
1/*
2 * net/dccp/ccids/ccid2.h
3 *
4 * Copyright (c) 2005 Andrea Bittau <a.bittau@cs.ucl.ac.uk>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20#ifndef _DCCP_CCID2_H_
21#define _DCCP_CCID2_H_
22
23#include <linux/dccp.h>
24#include <linux/timer.h>
25#include <linux/types.h>
26#include "../ccid.h"
27
28struct sock;
29
30struct ccid2_seq {
31 u64 ccid2s_seq;
32 unsigned long ccid2s_sent;
33 int ccid2s_acked;
34 struct ccid2_seq *ccid2s_prev;
35 struct ccid2_seq *ccid2s_next;
36};
37
38/** struct ccid2_hc_tx_sock - CCID2 TX half connection
39 *
40 * @ccid2hctx_ssacks - ACKs recv in slow start
41 * @ccid2hctx_acks - ACKS recv in AI phase
42 * @ccid2hctx_sent - packets sent in this window
43 * @ccid2hctx_lastrtt -time RTT was last measured
44 * @ccid2hctx_arsent - packets sent [ack ratio]
45 * @ccid2hctx_ackloss - ack was lost in this win
46 * @ccid2hctx_rpseq - last consecutive seqno
47 * @ccid2hctx_rpdupack - dupacks since rpseq
48*/
49struct ccid2_hc_tx_sock {
50 int ccid2hctx_cwnd;
51 int ccid2hctx_ssacks;
52 int ccid2hctx_acks;
53 int ccid2hctx_ssthresh;
54 int ccid2hctx_pipe;
55 int ccid2hctx_numdupack;
56 struct ccid2_seq *ccid2hctx_seqbuf;
57 struct ccid2_seq *ccid2hctx_seqh;
58 struct ccid2_seq *ccid2hctx_seqt;
59 long ccid2hctx_rto;
60 long ccid2hctx_srtt;
61 long ccid2hctx_rttvar;
62 int ccid2hctx_sent;
63 unsigned long ccid2hctx_lastrtt;
64 struct timer_list ccid2hctx_rtotimer;
65 unsigned long ccid2hctx_arsent;
66 int ccid2hctx_ackloss;
67 u64 ccid2hctx_rpseq;
68 int ccid2hctx_rpdupack;
69 int ccid2hctx_sendwait;
70};
71
72struct ccid2_hc_rx_sock {
73 int ccid2hcrx_data;
74};
75
76static inline struct ccid2_hc_tx_sock *ccid2_hc_tx_sk(const struct sock *sk)
77{
78 return ccid_priv(dccp_sk(sk)->dccps_hc_tx_ccid);
79}
80
81static inline struct ccid2_hc_rx_sock *ccid2_hc_rx_sk(const struct sock *sk)
82{
83 return ccid_priv(dccp_sk(sk)->dccps_hc_rx_ccid);
84}
85#endif /* _DCCP_CCID2_H_ */
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
index 35d1d347541c..b4a51d0355a5 100644
--- a/net/dccp/ccids/ccid3.c
+++ b/net/dccp/ccids/ccid3.c
@@ -46,7 +46,7 @@
46 * Reason for maths here is to avoid 32 bit overflow when a is big. 46 * Reason for maths here is to avoid 32 bit overflow when a is big.
47 * With this we get close to the limit. 47 * With this we get close to the limit.
48 */ 48 */
49static inline u32 usecs_div(const u32 a, const u32 b) 49static u32 usecs_div(const u32 a, const u32 b)
50{ 50{
51 const u32 div = a < (UINT_MAX / (USEC_PER_SEC / 10)) ? 10 : 51 const u32 div = a < (UINT_MAX / (USEC_PER_SEC / 10)) ? 10 :
52 a < (UINT_MAX / (USEC_PER_SEC / 50)) ? 50 : 52 a < (UINT_MAX / (USEC_PER_SEC / 50)) ? 50 :
@@ -76,15 +76,6 @@ static struct dccp_tx_hist *ccid3_tx_hist;
76static struct dccp_rx_hist *ccid3_rx_hist; 76static struct dccp_rx_hist *ccid3_rx_hist;
77static struct dccp_li_hist *ccid3_li_hist; 77static struct dccp_li_hist *ccid3_li_hist;
78 78
79static int ccid3_init(struct sock *sk)
80{
81 return 0;
82}
83
84static void ccid3_exit(struct sock *sk)
85{
86}
87
88/* TFRC sender states */ 79/* TFRC sender states */
89enum ccid3_hc_tx_states { 80enum ccid3_hc_tx_states {
90 TFRC_SSTATE_NO_SENT = 1, 81 TFRC_SSTATE_NO_SENT = 1,
@@ -107,8 +98,8 @@ static const char *ccid3_tx_state_name(enum ccid3_hc_tx_states state)
107} 98}
108#endif 99#endif
109 100
110static inline void ccid3_hc_tx_set_state(struct sock *sk, 101static void ccid3_hc_tx_set_state(struct sock *sk,
111 enum ccid3_hc_tx_states state) 102 enum ccid3_hc_tx_states state)
112{ 103{
113 struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); 104 struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
114 enum ccid3_hc_tx_states oldstate = hctx->ccid3hctx_state; 105 enum ccid3_hc_tx_states oldstate = hctx->ccid3hctx_state;
@@ -316,8 +307,6 @@ static int ccid3_hc_tx_send_packet(struct sock *sk,
316 307
317 switch (hctx->ccid3hctx_state) { 308 switch (hctx->ccid3hctx_state) {
318 case TFRC_SSTATE_NO_SENT: 309 case TFRC_SSTATE_NO_SENT:
319 hctx->ccid3hctx_no_feedback_timer.function = ccid3_hc_tx_no_feedback_timer;
320 hctx->ccid3hctx_no_feedback_timer.data = (unsigned long)sk;
321 sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer, 310 sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer,
322 jiffies + usecs_to_jiffies(TFRC_INITIAL_TIMEOUT)); 311 jiffies + usecs_to_jiffies(TFRC_INITIAL_TIMEOUT));
323 hctx->ccid3hctx_last_win_count = 0; 312 hctx->ccid3hctx_last_win_count = 0;
@@ -585,16 +574,15 @@ static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
585 } 574 }
586} 575}
587 576
588static void ccid3_hc_tx_insert_options(struct sock *sk, struct sk_buff *skb) 577static int ccid3_hc_tx_insert_options(struct sock *sk, struct sk_buff *skb)
589{ 578{
590 const struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); 579 const struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
591 580
592 BUG_ON(hctx == NULL); 581 BUG_ON(hctx == NULL);
593 582
594 if (!(sk->sk_state == DCCP_OPEN || sk->sk_state == DCCP_PARTOPEN)) 583 if (sk->sk_state == DCCP_OPEN || sk->sk_state == DCCP_PARTOPEN)
595 return; 584 DCCP_SKB_CB(skb)->dccpd_ccval = hctx->ccid3hctx_last_win_count;
596 585 return 0;
597 DCCP_SKB_CB(skb)->dccpd_ccval = hctx->ccid3hctx_last_win_count;
598} 586}
599 587
600static int ccid3_hc_tx_parse_options(struct sock *sk, unsigned char option, 588static int ccid3_hc_tx_parse_options(struct sock *sk, unsigned char option,
@@ -626,7 +614,7 @@ static int ccid3_hc_tx_parse_options(struct sock *sk, unsigned char option,
626 __FUNCTION__, dccp_role(sk), sk); 614 __FUNCTION__, dccp_role(sk), sk);
627 rc = -EINVAL; 615 rc = -EINVAL;
628 } else { 616 } else {
629 opt_recv->ccid3or_loss_event_rate = ntohl(*(u32 *)value); 617 opt_recv->ccid3or_loss_event_rate = ntohl(*(__be32 *)value);
630 ccid3_pr_debug("%s, sk=%p, LOSS_EVENT_RATE=%u\n", 618 ccid3_pr_debug("%s, sk=%p, LOSS_EVENT_RATE=%u\n",
631 dccp_role(sk), sk, 619 dccp_role(sk), sk,
632 opt_recv->ccid3or_loss_event_rate); 620 opt_recv->ccid3or_loss_event_rate);
@@ -647,7 +635,7 @@ static int ccid3_hc_tx_parse_options(struct sock *sk, unsigned char option,
647 __FUNCTION__, dccp_role(sk), sk); 635 __FUNCTION__, dccp_role(sk), sk);
648 rc = -EINVAL; 636 rc = -EINVAL;
649 } else { 637 } else {
650 opt_recv->ccid3or_receive_rate = ntohl(*(u32 *)value); 638 opt_recv->ccid3or_receive_rate = ntohl(*(__be32 *)value);
651 ccid3_pr_debug("%s, sk=%p, RECEIVE_RATE=%u\n", 639 ccid3_pr_debug("%s, sk=%p, RECEIVE_RATE=%u\n",
652 dccp_role(sk), sk, 640 dccp_role(sk), sk,
653 opt_recv->ccid3or_receive_rate); 641 opt_recv->ccid3or_receive_rate);
@@ -658,17 +646,10 @@ static int ccid3_hc_tx_parse_options(struct sock *sk, unsigned char option,
658 return rc; 646 return rc;
659} 647}
660 648
661static int ccid3_hc_tx_init(struct sock *sk) 649static int ccid3_hc_tx_init(struct ccid *ccid, struct sock *sk)
662{ 650{
663 struct dccp_sock *dp = dccp_sk(sk); 651 struct dccp_sock *dp = dccp_sk(sk);
664 struct ccid3_hc_tx_sock *hctx; 652 struct ccid3_hc_tx_sock *hctx = ccid_priv(ccid);
665
666 dp->dccps_hc_tx_ccid_private = kmalloc(sizeof(*hctx), gfp_any());
667 if (dp->dccps_hc_tx_ccid_private == NULL)
668 return -ENOMEM;
669
670 hctx = ccid3_hc_tx_sk(sk);
671 memset(hctx, 0, sizeof(*hctx));
672 653
673 if (dp->dccps_packet_size >= TFRC_MIN_PACKET_SIZE && 654 if (dp->dccps_packet_size >= TFRC_MIN_PACKET_SIZE &&
674 dp->dccps_packet_size <= TFRC_MAX_PACKET_SIZE) 655 dp->dccps_packet_size <= TFRC_MAX_PACKET_SIZE)
@@ -681,6 +662,9 @@ static int ccid3_hc_tx_init(struct sock *sk)
681 hctx->ccid3hctx_t_rto = USEC_PER_SEC; 662 hctx->ccid3hctx_t_rto = USEC_PER_SEC;
682 hctx->ccid3hctx_state = TFRC_SSTATE_NO_SENT; 663 hctx->ccid3hctx_state = TFRC_SSTATE_NO_SENT;
683 INIT_LIST_HEAD(&hctx->ccid3hctx_hist); 664 INIT_LIST_HEAD(&hctx->ccid3hctx_hist);
665
666 hctx->ccid3hctx_no_feedback_timer.function = ccid3_hc_tx_no_feedback_timer;
667 hctx->ccid3hctx_no_feedback_timer.data = (unsigned long)sk;
684 init_timer(&hctx->ccid3hctx_no_feedback_timer); 668 init_timer(&hctx->ccid3hctx_no_feedback_timer);
685 669
686 return 0; 670 return 0;
@@ -688,7 +672,6 @@ static int ccid3_hc_tx_init(struct sock *sk)
688 672
689static void ccid3_hc_tx_exit(struct sock *sk) 673static void ccid3_hc_tx_exit(struct sock *sk)
690{ 674{
691 struct dccp_sock *dp = dccp_sk(sk);
692 struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); 675 struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
693 676
694 BUG_ON(hctx == NULL); 677 BUG_ON(hctx == NULL);
@@ -698,9 +681,6 @@ static void ccid3_hc_tx_exit(struct sock *sk)
698 681
699 /* Empty packet history */ 682 /* Empty packet history */
700 dccp_tx_hist_purge(ccid3_tx_hist, &hctx->ccid3hctx_hist); 683 dccp_tx_hist_purge(ccid3_tx_hist, &hctx->ccid3hctx_hist);
701
702 kfree(dp->dccps_hc_tx_ccid_private);
703 dp->dccps_hc_tx_ccid_private = NULL;
704} 684}
705 685
706/* 686/*
@@ -727,8 +707,8 @@ static const char *ccid3_rx_state_name(enum ccid3_hc_rx_states state)
727} 707}
728#endif 708#endif
729 709
730static inline void ccid3_hc_rx_set_state(struct sock *sk, 710static void ccid3_hc_rx_set_state(struct sock *sk,
731 enum ccid3_hc_rx_states state) 711 enum ccid3_hc_rx_states state)
732{ 712{
733 struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk); 713 struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
734 enum ccid3_hc_rx_states oldstate = hcrx->ccid3hcrx_state; 714 enum ccid3_hc_rx_states oldstate = hcrx->ccid3hcrx_state;
@@ -793,31 +773,35 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk)
793 dccp_send_ack(sk); 773 dccp_send_ack(sk);
794} 774}
795 775
796static void ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb) 776static int ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb)
797{ 777{
798 const struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk); 778 const struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
799 u32 x_recv, pinv; 779 __be32 x_recv, pinv;
800 780
801 BUG_ON(hcrx == NULL); 781 BUG_ON(hcrx == NULL);
802 782
803 if (!(sk->sk_state == DCCP_OPEN || sk->sk_state == DCCP_PARTOPEN)) 783 if (!(sk->sk_state == DCCP_OPEN || sk->sk_state == DCCP_PARTOPEN))
804 return; 784 return 0;
805 785
806 DCCP_SKB_CB(skb)->dccpd_ccval = hcrx->ccid3hcrx_last_counter; 786 DCCP_SKB_CB(skb)->dccpd_ccval = hcrx->ccid3hcrx_last_counter;
807 787
808 if (dccp_packet_without_ack(skb)) 788 if (dccp_packet_without_ack(skb))
809 return; 789 return 0;
810 790
811 if (hcrx->ccid3hcrx_elapsed_time != 0)
812 dccp_insert_option_elapsed_time(sk, skb,
813 hcrx->ccid3hcrx_elapsed_time);
814 dccp_insert_option_timestamp(sk, skb);
815 x_recv = htonl(hcrx->ccid3hcrx_x_recv); 791 x_recv = htonl(hcrx->ccid3hcrx_x_recv);
816 pinv = htonl(hcrx->ccid3hcrx_pinv); 792 pinv = htonl(hcrx->ccid3hcrx_pinv);
817 dccp_insert_option(sk, skb, TFRC_OPT_LOSS_EVENT_RATE, 793
818 &pinv, sizeof(pinv)); 794 if ((hcrx->ccid3hcrx_elapsed_time != 0 &&
819 dccp_insert_option(sk, skb, TFRC_OPT_RECEIVE_RATE, 795 dccp_insert_option_elapsed_time(sk, skb,
820 &x_recv, sizeof(x_recv)); 796 hcrx->ccid3hcrx_elapsed_time)) ||
797 dccp_insert_option_timestamp(sk, skb) ||
798 dccp_insert_option(sk, skb, TFRC_OPT_LOSS_EVENT_RATE,
799 &pinv, sizeof(pinv)) ||
800 dccp_insert_option(sk, skb, TFRC_OPT_RECEIVE_RATE,
801 &x_recv, sizeof(x_recv)))
802 return -1;
803
804 return 0;
821} 805}
822 806
823/* calculate first loss interval 807/* calculate first loss interval
@@ -1047,20 +1031,13 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
1047 } 1031 }
1048} 1032}
1049 1033
1050static int ccid3_hc_rx_init(struct sock *sk) 1034static int ccid3_hc_rx_init(struct ccid *ccid, struct sock *sk)
1051{ 1035{
1052 struct dccp_sock *dp = dccp_sk(sk); 1036 struct dccp_sock *dp = dccp_sk(sk);
1053 struct ccid3_hc_rx_sock *hcrx; 1037 struct ccid3_hc_rx_sock *hcrx = ccid_priv(ccid);
1054 1038
1055 ccid3_pr_debug("%s, sk=%p\n", dccp_role(sk), sk); 1039 ccid3_pr_debug("%s, sk=%p\n", dccp_role(sk), sk);
1056 1040
1057 dp->dccps_hc_rx_ccid_private = kmalloc(sizeof(*hcrx), gfp_any());
1058 if (dp->dccps_hc_rx_ccid_private == NULL)
1059 return -ENOMEM;
1060
1061 hcrx = ccid3_hc_rx_sk(sk);
1062 memset(hcrx, 0, sizeof(*hcrx));
1063
1064 if (dp->dccps_packet_size >= TFRC_MIN_PACKET_SIZE && 1041 if (dp->dccps_packet_size >= TFRC_MIN_PACKET_SIZE &&
1065 dp->dccps_packet_size <= TFRC_MAX_PACKET_SIZE) 1042 dp->dccps_packet_size <= TFRC_MAX_PACKET_SIZE)
1066 hcrx->ccid3hcrx_s = dp->dccps_packet_size; 1043 hcrx->ccid3hcrx_s = dp->dccps_packet_size;
@@ -1079,7 +1056,6 @@ static int ccid3_hc_rx_init(struct sock *sk)
1079static void ccid3_hc_rx_exit(struct sock *sk) 1056static void ccid3_hc_rx_exit(struct sock *sk)
1080{ 1057{
1081 struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk); 1058 struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
1082 struct dccp_sock *dp = dccp_sk(sk);
1083 1059
1084 BUG_ON(hcrx == NULL); 1060 BUG_ON(hcrx == NULL);
1085 1061
@@ -1090,9 +1066,6 @@ static void ccid3_hc_rx_exit(struct sock *sk)
1090 1066
1091 /* Empty loss interval history */ 1067 /* Empty loss interval history */
1092 dccp_li_hist_purge(ccid3_li_hist, &hcrx->ccid3hcrx_li_hist); 1068 dccp_li_hist_purge(ccid3_li_hist, &hcrx->ccid3hcrx_li_hist);
1093
1094 kfree(dp->dccps_hc_rx_ccid_private);
1095 dp->dccps_hc_rx_ccid_private = NULL;
1096} 1069}
1097 1070
1098static void ccid3_hc_rx_get_info(struct sock *sk, struct tcp_info *info) 1071static void ccid3_hc_rx_get_info(struct sock *sk, struct tcp_info *info)
@@ -1178,12 +1151,11 @@ static int ccid3_hc_tx_getsockopt(struct sock *sk, const int optname, int len,
1178 return 0; 1151 return 0;
1179} 1152}
1180 1153
1181static struct ccid ccid3 = { 1154static struct ccid_operations ccid3 = {
1182 .ccid_id = 3, 1155 .ccid_id = 3,
1183 .ccid_name = "ccid3", 1156 .ccid_name = "ccid3",
1184 .ccid_owner = THIS_MODULE, 1157 .ccid_owner = THIS_MODULE,
1185 .ccid_init = ccid3_init, 1158 .ccid_hc_tx_obj_size = sizeof(struct ccid3_hc_tx_sock),
1186 .ccid_exit = ccid3_exit,
1187 .ccid_hc_tx_init = ccid3_hc_tx_init, 1159 .ccid_hc_tx_init = ccid3_hc_tx_init,
1188 .ccid_hc_tx_exit = ccid3_hc_tx_exit, 1160 .ccid_hc_tx_exit = ccid3_hc_tx_exit,
1189 .ccid_hc_tx_send_packet = ccid3_hc_tx_send_packet, 1161 .ccid_hc_tx_send_packet = ccid3_hc_tx_send_packet,
@@ -1191,6 +1163,7 @@ static struct ccid ccid3 = {
1191 .ccid_hc_tx_packet_recv = ccid3_hc_tx_packet_recv, 1163 .ccid_hc_tx_packet_recv = ccid3_hc_tx_packet_recv,
1192 .ccid_hc_tx_insert_options = ccid3_hc_tx_insert_options, 1164 .ccid_hc_tx_insert_options = ccid3_hc_tx_insert_options,
1193 .ccid_hc_tx_parse_options = ccid3_hc_tx_parse_options, 1165 .ccid_hc_tx_parse_options = ccid3_hc_tx_parse_options,
1166 .ccid_hc_rx_obj_size = sizeof(struct ccid3_hc_rx_sock),
1194 .ccid_hc_rx_init = ccid3_hc_rx_init, 1167 .ccid_hc_rx_init = ccid3_hc_rx_init,
1195 .ccid_hc_rx_exit = ccid3_hc_rx_exit, 1168 .ccid_hc_rx_exit = ccid3_hc_rx_exit,
1196 .ccid_hc_rx_insert_options = ccid3_hc_rx_insert_options, 1169 .ccid_hc_rx_insert_options = ccid3_hc_rx_insert_options,
@@ -1241,15 +1214,6 @@ module_init(ccid3_module_init);
1241 1214
1242static __exit void ccid3_module_exit(void) 1215static __exit void ccid3_module_exit(void)
1243{ 1216{
1244#ifdef CONFIG_IP_DCCP_UNLOAD_HACK
1245 /*
1246 * Hack to use while developing, so that we get rid of the control
1247 * sock, that is what keeps a refcount on dccp.ko -acme
1248 */
1249 extern void dccp_ctl_sock_exit(void);
1250
1251 dccp_ctl_sock_exit();
1252#endif
1253 ccid_unregister(&ccid3); 1217 ccid_unregister(&ccid3);
1254 1218
1255 if (ccid3_tx_hist != NULL) { 1219 if (ccid3_tx_hist != NULL) {
diff --git a/net/dccp/ccids/ccid3.h b/net/dccp/ccids/ccid3.h
index 0bde4583d091..f18b96d4e5a2 100644
--- a/net/dccp/ccids/ccid3.h
+++ b/net/dccp/ccids/ccid3.h
@@ -41,6 +41,7 @@
41#include <linux/time.h> 41#include <linux/time.h>
42#include <linux/types.h> 42#include <linux/types.h>
43#include <linux/tfrc.h> 43#include <linux/tfrc.h>
44#include "../ccid.h"
44 45
45#define TFRC_MIN_PACKET_SIZE 16 46#define TFRC_MIN_PACKET_SIZE 16
46#define TFRC_STD_PACKET_SIZE 256 47#define TFRC_STD_PACKET_SIZE 256
@@ -135,12 +136,12 @@ struct ccid3_hc_rx_sock {
135 136
136static inline struct ccid3_hc_tx_sock *ccid3_hc_tx_sk(const struct sock *sk) 137static inline struct ccid3_hc_tx_sock *ccid3_hc_tx_sk(const struct sock *sk)
137{ 138{
138 return dccp_sk(sk)->dccps_hc_tx_ccid_private; 139 return ccid_priv(dccp_sk(sk)->dccps_hc_tx_ccid);
139} 140}
140 141
141static inline struct ccid3_hc_rx_sock *ccid3_hc_rx_sk(const struct sock *sk) 142static inline struct ccid3_hc_rx_sock *ccid3_hc_rx_sk(const struct sock *sk)
142{ 143{
143 return dccp_sk(sk)->dccps_hc_rx_ccid_private; 144 return ccid_priv(dccp_sk(sk)->dccps_hc_rx_ccid);
144} 145}
145 146
146#endif /* _DCCP_CCID3_H_ */ 147#endif /* _DCCP_CCID3_H_ */
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index 93f26dd6e6cb..1fe509148689 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -59,8 +59,6 @@ extern void dccp_time_wait(struct sock *sk, int state, int timeo);
59 59
60#define DCCP_RTO_MAX ((unsigned)(120 * HZ)) /* FIXME: using TCP value */ 60#define DCCP_RTO_MAX ((unsigned)(120 * HZ)) /* FIXME: using TCP value */
61 61
62extern struct proto dccp_prot;
63
64/* is seq1 < seq2 ? */ 62/* is seq1 < seq2 ? */
65static inline int before48(const u64 seq1, const u64 seq2) 63static inline int before48(const u64 seq1, const u64 seq2)
66{ 64{
@@ -120,7 +118,6 @@ DECLARE_SNMP_STAT(struct dccp_mib, dccp_statistics);
120 118
121extern int dccp_retransmit_skb(struct sock *sk, struct sk_buff *skb); 119extern int dccp_retransmit_skb(struct sock *sk, struct sk_buff *skb);
122 120
123extern int dccp_send_response(struct sock *sk);
124extern void dccp_send_ack(struct sock *sk); 121extern void dccp_send_ack(struct sock *sk);
125extern void dccp_send_delayed_ack(struct sock *sk); 122extern void dccp_send_delayed_ack(struct sock *sk);
126extern void dccp_send_sync(struct sock *sk, const u64 seq, 123extern void dccp_send_sync(struct sock *sk, const u64 seq,
@@ -140,53 +137,8 @@ extern unsigned int dccp_sync_mss(struct sock *sk, u32 pmtu);
140extern const char *dccp_packet_name(const int type); 137extern const char *dccp_packet_name(const int type);
141extern const char *dccp_state_name(const int state); 138extern const char *dccp_state_name(const int state);
142 139
143static inline void dccp_set_state(struct sock *sk, const int state) 140extern void dccp_set_state(struct sock *sk, const int state);
144{ 141extern void dccp_done(struct sock *sk);
145 const int oldstate = sk->sk_state;
146
147 dccp_pr_debug("%s(%p) %-10.10s -> %s\n",
148 dccp_role(sk), sk,
149 dccp_state_name(oldstate), dccp_state_name(state));
150 WARN_ON(state == oldstate);
151
152 switch (state) {
153 case DCCP_OPEN:
154 if (oldstate != DCCP_OPEN)
155 DCCP_INC_STATS(DCCP_MIB_CURRESTAB);
156 break;
157
158 case DCCP_CLOSED:
159 if (oldstate == DCCP_CLOSING || oldstate == DCCP_OPEN)
160 DCCP_INC_STATS(DCCP_MIB_ESTABRESETS);
161
162 sk->sk_prot->unhash(sk);
163 if (inet_csk(sk)->icsk_bind_hash != NULL &&
164 !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
165 inet_put_port(&dccp_hashinfo, sk);
166 /* fall through */
167 default:
168 if (oldstate == DCCP_OPEN)
169 DCCP_DEC_STATS(DCCP_MIB_CURRESTAB);
170 }
171
172 /* Change state AFTER socket is unhashed to avoid closed
173 * socket sitting in hash tables.
174 */
175 sk->sk_state = state;
176}
177
178static inline void dccp_done(struct sock *sk)
179{
180 dccp_set_state(sk, DCCP_CLOSED);
181 dccp_clear_xmit_timers(sk);
182
183 sk->sk_shutdown = SHUTDOWN_MASK;
184
185 if (!sock_flag(sk, SOCK_DEAD))
186 sk->sk_state_change(sk);
187 else
188 inet_csk_destroy_sock(sk);
189}
190 142
191static inline void dccp_openreq_init(struct request_sock *req, 143static inline void dccp_openreq_init(struct request_sock *req,
192 struct dccp_sock *dp, 144 struct dccp_sock *dp,
@@ -209,10 +161,6 @@ extern struct sock *dccp_create_openreq_child(struct sock *sk,
209 161
210extern int dccp_v4_do_rcv(struct sock *sk, struct sk_buff *skb); 162extern int dccp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
211 163
212extern void dccp_v4_err(struct sk_buff *skb, u32);
213
214extern int dccp_v4_rcv(struct sk_buff *skb);
215
216extern struct sock *dccp_v4_request_recv_sock(struct sock *sk, 164extern struct sock *dccp_v4_request_recv_sock(struct sock *sk,
217 struct sk_buff *skb, 165 struct sk_buff *skb,
218 struct request_sock *req, 166 struct request_sock *req,
@@ -228,24 +176,30 @@ extern int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
228extern int dccp_rcv_established(struct sock *sk, struct sk_buff *skb, 176extern int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
229 const struct dccp_hdr *dh, const unsigned len); 177 const struct dccp_hdr *dh, const unsigned len);
230 178
231extern int dccp_v4_init_sock(struct sock *sk); 179extern int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized);
232extern int dccp_v4_destroy_sock(struct sock *sk); 180extern int dccp_destroy_sock(struct sock *sk);
233 181
234extern void dccp_close(struct sock *sk, long timeout); 182extern void dccp_close(struct sock *sk, long timeout);
235extern struct sk_buff *dccp_make_response(struct sock *sk, 183extern struct sk_buff *dccp_make_response(struct sock *sk,
236 struct dst_entry *dst, 184 struct dst_entry *dst,
237 struct request_sock *req); 185 struct request_sock *req);
238extern struct sk_buff *dccp_make_reset(struct sock *sk,
239 struct dst_entry *dst,
240 enum dccp_reset_codes code);
241 186
242extern int dccp_connect(struct sock *sk); 187extern int dccp_connect(struct sock *sk);
243extern int dccp_disconnect(struct sock *sk, int flags); 188extern int dccp_disconnect(struct sock *sk, int flags);
189extern void dccp_hash(struct sock *sk);
244extern void dccp_unhash(struct sock *sk); 190extern void dccp_unhash(struct sock *sk);
245extern int dccp_getsockopt(struct sock *sk, int level, int optname, 191extern int dccp_getsockopt(struct sock *sk, int level, int optname,
246 char __user *optval, int __user *optlen); 192 char __user *optval, int __user *optlen);
247extern int dccp_setsockopt(struct sock *sk, int level, int optname, 193extern int dccp_setsockopt(struct sock *sk, int level, int optname,
248 char __user *optval, int optlen); 194 char __user *optval, int optlen);
195#ifdef CONFIG_COMPAT
196extern int compat_dccp_getsockopt(struct sock *sk,
197 int level, int optname,
198 char __user *optval, int __user *optlen);
199extern int compat_dccp_setsockopt(struct sock *sk,
200 int level, int optname,
201 char __user *optval, int optlen);
202#endif
249extern int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg); 203extern int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg);
250extern int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, 204extern int dccp_sendmsg(struct kiocb *iocb, struct sock *sk,
251 struct msghdr *msg, size_t size); 205 struct msghdr *msg, size_t size);
@@ -262,15 +216,14 @@ extern int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr,
262 int addr_len); 216 int addr_len);
263 217
264extern int dccp_v4_checksum(const struct sk_buff *skb, 218extern int dccp_v4_checksum(const struct sk_buff *skb,
265 const u32 saddr, const u32 daddr); 219 const __be32 saddr, const __be32 daddr);
266 220
267extern int dccp_v4_send_reset(struct sock *sk, 221extern int dccp_send_reset(struct sock *sk, enum dccp_reset_codes code);
268 enum dccp_reset_codes code);
269extern void dccp_send_close(struct sock *sk, const int active); 222extern void dccp_send_close(struct sock *sk, const int active);
270extern int dccp_invalid_packet(struct sk_buff *skb); 223extern int dccp_invalid_packet(struct sk_buff *skb);
271 224
272static inline int dccp_bad_service_code(const struct sock *sk, 225static inline int dccp_bad_service_code(const struct sock *sk,
273 const __u32 service) 226 const __be32 service)
274{ 227{
275 const struct dccp_sock *dp = dccp_sk(sk); 228 const struct dccp_sock *dp = dccp_sk(sk);
276 229
@@ -334,41 +287,29 @@ static inline void dccp_hdr_set_seq(struct dccp_hdr *dh, const u64 gss)
334{ 287{
335 struct dccp_hdr_ext *dhx = (struct dccp_hdr_ext *)((void *)dh + 288 struct dccp_hdr_ext *dhx = (struct dccp_hdr_ext *)((void *)dh +
336 sizeof(*dh)); 289 sizeof(*dh));
337 290 dh->dccph_seq2 = 0;
338#if defined(__LITTLE_ENDIAN_BITFIELD) 291 dh->dccph_seq = htons((gss >> 32) & 0xfffff);
339 dh->dccph_seq = htonl((gss >> 32)) >> 8;
340#elif defined(__BIG_ENDIAN_BITFIELD)
341 dh->dccph_seq = htonl((gss >> 32));
342#else
343#error "Adjust your <asm/byteorder.h> defines"
344#endif
345 dhx->dccph_seq_low = htonl(gss & 0xffffffff); 292 dhx->dccph_seq_low = htonl(gss & 0xffffffff);
346} 293}
347 294
348static inline void dccp_hdr_set_ack(struct dccp_hdr_ack_bits *dhack, 295static inline void dccp_hdr_set_ack(struct dccp_hdr_ack_bits *dhack,
349 const u64 gsr) 296 const u64 gsr)
350{ 297{
351#if defined(__LITTLE_ENDIAN_BITFIELD) 298 dhack->dccph_reserved1 = 0;
352 dhack->dccph_ack_nr_high = htonl((gsr >> 32)) >> 8; 299 dhack->dccph_ack_nr_high = htons(gsr >> 32);
353#elif defined(__BIG_ENDIAN_BITFIELD)
354 dhack->dccph_ack_nr_high = htonl((gsr >> 32));
355#else
356#error "Adjust your <asm/byteorder.h> defines"
357#endif
358 dhack->dccph_ack_nr_low = htonl(gsr & 0xffffffff); 300 dhack->dccph_ack_nr_low = htonl(gsr & 0xffffffff);
359} 301}
360 302
361static inline void dccp_update_gsr(struct sock *sk, u64 seq) 303static inline void dccp_update_gsr(struct sock *sk, u64 seq)
362{ 304{
363 struct dccp_sock *dp = dccp_sk(sk); 305 struct dccp_sock *dp = dccp_sk(sk);
306 const struct dccp_minisock *dmsk = dccp_msk(sk);
364 307
365 dp->dccps_gsr = seq; 308 dp->dccps_gsr = seq;
366 dccp_set_seqno(&dp->dccps_swl, 309 dccp_set_seqno(&dp->dccps_swl,
367 (dp->dccps_gsr + 1 - 310 dp->dccps_gsr + 1 - (dmsk->dccpms_sequence_window / 4));
368 (dp->dccps_options.dccpo_sequence_window / 4)));
369 dccp_set_seqno(&dp->dccps_swh, 311 dccp_set_seqno(&dp->dccps_swh,
370 (dp->dccps_gsr + 312 dp->dccps_gsr + (3 * dmsk->dccpms_sequence_window) / 4);
371 (3 * dp->dccps_options.dccpo_sequence_window) / 4));
372} 313}
373 314
374static inline void dccp_update_gss(struct sock *sk, u64 seq) 315static inline void dccp_update_gss(struct sock *sk, u64 seq)
@@ -378,7 +319,7 @@ static inline void dccp_update_gss(struct sock *sk, u64 seq)
378 dp->dccps_awh = dp->dccps_gss = seq; 319 dp->dccps_awh = dp->dccps_gss = seq;
379 dccp_set_seqno(&dp->dccps_awl, 320 dccp_set_seqno(&dp->dccps_awl,
380 (dp->dccps_gss - 321 (dp->dccps_gss -
381 dp->dccps_options.dccpo_sequence_window + 1)); 322 dccp_msk(sk)->dccpms_sequence_window + 1));
382} 323}
383 324
384static inline int dccp_ack_pending(const struct sock *sk) 325static inline int dccp_ack_pending(const struct sock *sk)
@@ -386,24 +327,22 @@ static inline int dccp_ack_pending(const struct sock *sk)
386 const struct dccp_sock *dp = dccp_sk(sk); 327 const struct dccp_sock *dp = dccp_sk(sk);
387 return dp->dccps_timestamp_echo != 0 || 328 return dp->dccps_timestamp_echo != 0 ||
388#ifdef CONFIG_IP_DCCP_ACKVEC 329#ifdef CONFIG_IP_DCCP_ACKVEC
389 (dp->dccps_options.dccpo_send_ack_vector && 330 (dccp_msk(sk)->dccpms_send_ack_vector &&
390 dccp_ackvec_pending(dp->dccps_hc_rx_ackvec)) || 331 dccp_ackvec_pending(dp->dccps_hc_rx_ackvec)) ||
391#endif 332#endif
392 inet_csk_ack_scheduled(sk); 333 inet_csk_ack_scheduled(sk);
393} 334}
394 335
395extern void dccp_insert_options(struct sock *sk, struct sk_buff *skb); 336extern int dccp_insert_options(struct sock *sk, struct sk_buff *skb);
396extern void dccp_insert_option_elapsed_time(struct sock *sk, 337extern int dccp_insert_option_elapsed_time(struct sock *sk,
397 struct sk_buff *skb, 338 struct sk_buff *skb,
398 u32 elapsed_time); 339 u32 elapsed_time);
399extern void dccp_insert_option_timestamp(struct sock *sk, 340extern int dccp_insert_option_timestamp(struct sock *sk,
400 struct sk_buff *skb); 341 struct sk_buff *skb);
401extern void dccp_insert_option(struct sock *sk, struct sk_buff *skb, 342extern int dccp_insert_option(struct sock *sk, struct sk_buff *skb,
402 unsigned char option, 343 unsigned char option,
403 const void *value, unsigned char len); 344 const void *value, unsigned char len);
404 345
405extern struct socket *dccp_ctl_socket;
406
407extern void dccp_timestamp(const struct sock *sk, struct timeval *tv); 346extern void dccp_timestamp(const struct sock *sk, struct timeval *tv);
408 347
409static inline suseconds_t timeval_usecs(const struct timeval *tv) 348static inline suseconds_t timeval_usecs(const struct timeval *tv)
@@ -444,4 +383,18 @@ static inline void timeval_sub_usecs(struct timeval *tv,
444 } 383 }
445} 384}
446 385
386#ifdef CONFIG_SYSCTL
387extern int dccp_sysctl_init(void);
388extern void dccp_sysctl_exit(void);
389#else
390static inline int dccp_sysctl_init(void)
391{
392 return 0;
393}
394
395static inline void dccp_sysctl_exit(void)
396{
397}
398#endif
399
447#endif /* _DCCP_H */ 400#endif /* _DCCP_H */
diff --git a/net/dccp/diag.c b/net/dccp/diag.c
index 3f78c00e3822..0f25dc395967 100644
--- a/net/dccp/diag.c
+++ b/net/dccp/diag.c
@@ -30,7 +30,7 @@ static void dccp_get_info(struct sock *sk, struct tcp_info *info)
30 info->tcpi_backoff = icsk->icsk_backoff; 30 info->tcpi_backoff = icsk->icsk_backoff;
31 info->tcpi_pmtu = icsk->icsk_pmtu_cookie; 31 info->tcpi_pmtu = icsk->icsk_pmtu_cookie;
32 32
33 if (dp->dccps_options.dccpo_send_ack_vector) 33 if (dccp_msk(sk)->dccpms_send_ack_vector)
34 info->tcpi_options |= TCPI_OPT_SACK; 34 info->tcpi_options |= TCPI_OPT_SACK;
35 35
36 ccid_hc_rx_get_info(dp->dccps_hc_rx_ccid, sk, info); 36 ccid_hc_rx_get_info(dp->dccps_hc_rx_ccid, sk, info);
diff --git a/net/dccp/feat.c b/net/dccp/feat.c
new file mode 100644
index 000000000000..e3dd30d36c8a
--- /dev/null
+++ b/net/dccp/feat.c
@@ -0,0 +1,586 @@
1/*
2 * net/dccp/feat.c
3 *
4 * An implementation of the DCCP protocol
5 * Andrea Bittau <a.bittau@cs.ucl.ac.uk>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/config.h>
14#include <linux/module.h>
15
16#include "dccp.h"
17#include "ccid.h"
18#include "feat.h"
19
20#define DCCP_FEAT_SP_NOAGREE (-123)
21
22int dccp_feat_change(struct dccp_minisock *dmsk, u8 type, u8 feature,
23 u8 *val, u8 len, gfp_t gfp)
24{
25 struct dccp_opt_pend *opt;
26
27 dccp_pr_debug("feat change type=%d feat=%d\n", type, feature);
28
29 /* XXX sanity check feat change request */
30
31 /* check if that feature is already being negotiated */
32 list_for_each_entry(opt, &dmsk->dccpms_pending, dccpop_node) {
33 /* ok we found a negotiation for this option already */
34 if (opt->dccpop_feat == feature && opt->dccpop_type == type) {
35 dccp_pr_debug("Replacing old\n");
36 /* replace */
37 BUG_ON(opt->dccpop_val == NULL);
38 kfree(opt->dccpop_val);
39 opt->dccpop_val = val;
40 opt->dccpop_len = len;
41 opt->dccpop_conf = 0;
42 return 0;
43 }
44 }
45
46 /* negotiation for a new feature */
47 opt = kmalloc(sizeof(*opt), gfp);
48 if (opt == NULL)
49 return -ENOMEM;
50
51 opt->dccpop_type = type;
52 opt->dccpop_feat = feature;
53 opt->dccpop_len = len;
54 opt->dccpop_val = val;
55 opt->dccpop_conf = 0;
56 opt->dccpop_sc = NULL;
57
58 BUG_ON(opt->dccpop_val == NULL);
59
60 list_add_tail(&opt->dccpop_node, &dmsk->dccpms_pending);
61 return 0;
62}
63
64EXPORT_SYMBOL_GPL(dccp_feat_change);
65
66static int dccp_feat_update_ccid(struct sock *sk, u8 type, u8 new_ccid_nr)
67{
68 struct dccp_sock *dp = dccp_sk(sk);
69 struct dccp_minisock *dmsk = dccp_msk(sk);
70 /* figure out if we are changing our CCID or the peer's */
71 const int rx = type == DCCPO_CHANGE_R;
72 const u8 ccid_nr = rx ? dmsk->dccpms_rx_ccid : dmsk->dccpms_tx_ccid;
73 struct ccid *new_ccid;
74
75 /* Check if nothing is being changed. */
76 if (ccid_nr == new_ccid_nr)
77 return 0;
78
79 new_ccid = ccid_new(new_ccid_nr, sk, rx, GFP_ATOMIC);
80 if (new_ccid == NULL)
81 return -ENOMEM;
82
83 if (rx) {
84 ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
85 dp->dccps_hc_rx_ccid = new_ccid;
86 dmsk->dccpms_rx_ccid = new_ccid_nr;
87 } else {
88 ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
89 dp->dccps_hc_tx_ccid = new_ccid;
90 dmsk->dccpms_tx_ccid = new_ccid_nr;
91 }
92
93 return 0;
94}
95
96/* XXX taking only u8 vals */
97static int dccp_feat_update(struct sock *sk, u8 type, u8 feat, u8 val)
98{
99 dccp_pr_debug("changing [%d] feat %d to %d\n", type, feat, val);
100
101 switch (feat) {
102 case DCCPF_CCID:
103 return dccp_feat_update_ccid(sk, type, val);
104 default:
105 dccp_pr_debug("IMPLEMENT changing [%d] feat %d to %d\n",
106 type, feat, val);
107 break;
108 }
109 return 0;
110}
111
112static int dccp_feat_reconcile(struct sock *sk, struct dccp_opt_pend *opt,
113 u8 *rpref, u8 rlen)
114{
115 struct dccp_sock *dp = dccp_sk(sk);
116 u8 *spref, slen, *res = NULL;
117 int i, j, rc, agree = 1;
118
119 BUG_ON(rpref == NULL);
120
121 /* check if we are the black sheep */
122 if (dp->dccps_role == DCCP_ROLE_CLIENT) {
123 spref = rpref;
124 slen = rlen;
125 rpref = opt->dccpop_val;
126 rlen = opt->dccpop_len;
127 } else {
128 spref = opt->dccpop_val;
129 slen = opt->dccpop_len;
130 }
131 /*
132 * Now we have server preference list in spref and client preference in
133 * rpref
134 */
135 BUG_ON(spref == NULL);
136 BUG_ON(rpref == NULL);
137
138 /* FIXME sanity check vals */
139
140 /* Are values in any order? XXX Lame "algorithm" here */
141 /* XXX assume values are 1 byte */
142 for (i = 0; i < slen; i++) {
143 for (j = 0; j < rlen; j++) {
144 if (spref[i] == rpref[j]) {
145 res = &spref[i];
146 break;
147 }
148 }
149 if (res)
150 break;
151 }
152
153 /* we didn't agree on anything */
154 if (res == NULL) {
155 /* confirm previous value */
156 switch (opt->dccpop_feat) {
157 case DCCPF_CCID:
158 /* XXX did i get this right? =P */
159 if (opt->dccpop_type == DCCPO_CHANGE_L)
160 res = &dccp_msk(sk)->dccpms_tx_ccid;
161 else
162 res = &dccp_msk(sk)->dccpms_rx_ccid;
163 break;
164
165 default:
166 WARN_ON(1); /* XXX implement res */
167 return -EFAULT;
168 }
169
170 dccp_pr_debug("Don't agree... reconfirming %d\n", *res);
171 agree = 0; /* this is used for mandatory options... */
172 }
173
174 /* need to put result and our preference list */
175 /* XXX assume 1 byte vals */
176 rlen = 1 + opt->dccpop_len;
177 rpref = kmalloc(rlen, GFP_ATOMIC);
178 if (rpref == NULL)
179 return -ENOMEM;
180
181 *rpref = *res;
182 memcpy(&rpref[1], opt->dccpop_val, opt->dccpop_len);
183
184 /* put it in the "confirm queue" */
185 if (opt->dccpop_sc == NULL) {
186 opt->dccpop_sc = kmalloc(sizeof(*opt->dccpop_sc), GFP_ATOMIC);
187 if (opt->dccpop_sc == NULL) {
188 kfree(rpref);
189 return -ENOMEM;
190 }
191 } else {
192 /* recycle the confirm slot */
193 BUG_ON(opt->dccpop_sc->dccpoc_val == NULL);
194 kfree(opt->dccpop_sc->dccpoc_val);
195 dccp_pr_debug("recycling confirm slot\n");
196 }
197 memset(opt->dccpop_sc, 0, sizeof(*opt->dccpop_sc));
198
199 opt->dccpop_sc->dccpoc_val = rpref;
200 opt->dccpop_sc->dccpoc_len = rlen;
201
202 /* update the option on our side [we are about to send the confirm] */
203 rc = dccp_feat_update(sk, opt->dccpop_type, opt->dccpop_feat, *res);
204 if (rc) {
205 kfree(opt->dccpop_sc->dccpoc_val);
206 kfree(opt->dccpop_sc);
207 opt->dccpop_sc = 0;
208 return rc;
209 }
210
211 dccp_pr_debug("Will confirm %d\n", *rpref);
212
213 /* say we want to change to X but we just got a confirm X, suppress our
214 * change
215 */
216 if (!opt->dccpop_conf) {
217 if (*opt->dccpop_val == *res)
218 opt->dccpop_conf = 1;
219 dccp_pr_debug("won't ask for change of same feature\n");
220 }
221
222 return agree ? 0 : DCCP_FEAT_SP_NOAGREE; /* used for mandatory opts */
223}
224
225static int dccp_feat_sp(struct sock *sk, u8 type, u8 feature, u8 *val, u8 len)
226{
227 struct dccp_minisock *dmsk = dccp_msk(sk);
228 struct dccp_opt_pend *opt;
229 int rc = 1;
230 u8 t;
231
232 /*
233 * We received a CHANGE. We gotta match it against our own preference
234 * list. If we got a CHANGE_R it means it's a change for us, so we need
235 * to compare our CHANGE_L list.
236 */
237 if (type == DCCPO_CHANGE_L)
238 t = DCCPO_CHANGE_R;
239 else
240 t = DCCPO_CHANGE_L;
241
242 /* find our preference list for this feature */
243 list_for_each_entry(opt, &dmsk->dccpms_pending, dccpop_node) {
244 if (opt->dccpop_type != t || opt->dccpop_feat != feature)
245 continue;
246
247 /* find the winner from the two preference lists */
248 rc = dccp_feat_reconcile(sk, opt, val, len);
249 break;
250 }
251
252 /* We didn't deal with the change. This can happen if we have no
253 * preference list for the feature. In fact, it just shouldn't
254 * happen---if we understand a feature, we should have a preference list
255 * with at least the default value.
256 */
257 BUG_ON(rc == 1);
258
259 return rc;
260}
261
262static int dccp_feat_nn(struct sock *sk, u8 type, u8 feature, u8 *val, u8 len)
263{
264 struct dccp_opt_pend *opt;
265 struct dccp_minisock *dmsk = dccp_msk(sk);
266 u8 *copy;
267 int rc;
268
269 /* NN features must be change L */
270 if (type == DCCPO_CHANGE_R) {
271 dccp_pr_debug("received CHANGE_R %d for NN feat %d\n",
272 type, feature);
273 return -EFAULT;
274 }
275
276 /* XXX sanity check opt val */
277
278 /* copy option so we can confirm it */
279 opt = kzalloc(sizeof(*opt), GFP_ATOMIC);
280 if (opt == NULL)
281 return -ENOMEM;
282
283 copy = kmalloc(len, GFP_ATOMIC);
284 if (copy == NULL) {
285 kfree(opt);
286 return -ENOMEM;
287 }
288 memcpy(copy, val, len);
289
290 opt->dccpop_type = DCCPO_CONFIRM_R; /* NN can only confirm R */
291 opt->dccpop_feat = feature;
292 opt->dccpop_val = copy;
293 opt->dccpop_len = len;
294
295 /* change feature */
296 rc = dccp_feat_update(sk, type, feature, *val);
297 if (rc) {
298 kfree(opt->dccpop_val);
299 kfree(opt);
300 return rc;
301 }
302
303 dccp_pr_debug("Confirming NN feature %d (val=%d)\n", feature, *copy);
304 list_add_tail(&opt->dccpop_node, &dmsk->dccpms_conf);
305
306 return 0;
307}
308
309static void dccp_feat_empty_confirm(struct dccp_minisock *dmsk,
310 u8 type, u8 feature)
311{
312 /* XXX check if other confirms for that are queued and recycle slot */
313 struct dccp_opt_pend *opt = kzalloc(sizeof(*opt), GFP_ATOMIC);
314
315 if (opt == NULL) {
316 /* XXX what do we do? Ignoring should be fine. It's a change
317 * after all =P
318 */
319 return;
320 }
321
322 opt->dccpop_type = type == DCCPO_CHANGE_L ? DCCPO_CONFIRM_R :
323 DCCPO_CONFIRM_L;
324 opt->dccpop_feat = feature;
325 opt->dccpop_val = 0;
326 opt->dccpop_len = 0;
327
328 /* change feature */
329 dccp_pr_debug("Empty confirm feature %d type %d\n", feature, type);
330 list_add_tail(&opt->dccpop_node, &dmsk->dccpms_conf);
331}
332
333static void dccp_feat_flush_confirm(struct sock *sk)
334{
335 struct dccp_minisock *dmsk = dccp_msk(sk);
336 /* Check if there is anything to confirm in the first place */
337 int yes = !list_empty(&dmsk->dccpms_conf);
338
339 if (!yes) {
340 struct dccp_opt_pend *opt;
341
342 list_for_each_entry(opt, &dmsk->dccpms_pending, dccpop_node) {
343 if (opt->dccpop_conf) {
344 yes = 1;
345 break;
346 }
347 }
348 }
349
350 if (!yes)
351 return;
352
353 /* OK there is something to confirm... */
354 /* XXX check if packet is in flight? Send delayed ack?? */
355 if (sk->sk_state == DCCP_OPEN)
356 dccp_send_ack(sk);
357}
358
359int dccp_feat_change_recv(struct sock *sk, u8 type, u8 feature, u8 *val, u8 len)
360{
361 int rc;
362
363 dccp_pr_debug("got feat change type=%d feat=%d\n", type, feature);
364
365 /* figure out if it's SP or NN feature */
366 switch (feature) {
367 /* deal with SP features */
368 case DCCPF_CCID:
369 rc = dccp_feat_sp(sk, type, feature, val, len);
370 break;
371
372 /* deal with NN features */
373 case DCCPF_ACK_RATIO:
374 rc = dccp_feat_nn(sk, type, feature, val, len);
375 break;
376
377 /* XXX implement other features */
378 default:
379 rc = -EFAULT;
380 break;
381 }
382
383 /* check if there were problems changing features */
384 if (rc) {
385 /* If we don't agree on SP, we sent a confirm for old value.
386 * However we propagate rc to caller in case option was
387 * mandatory
388 */
389 if (rc != DCCP_FEAT_SP_NOAGREE)
390 dccp_feat_empty_confirm(dccp_msk(sk), type, feature);
391 }
392
393 /* generate the confirm [if required] */
394 dccp_feat_flush_confirm(sk);
395
396 return rc;
397}
398
399EXPORT_SYMBOL_GPL(dccp_feat_change_recv);
400
401int dccp_feat_confirm_recv(struct sock *sk, u8 type, u8 feature,
402 u8 *val, u8 len)
403{
404 u8 t;
405 struct dccp_opt_pend *opt;
406 struct dccp_minisock *dmsk = dccp_msk(sk);
407 int rc = 1;
408 int all_confirmed = 1;
409
410 dccp_pr_debug("got feat confirm type=%d feat=%d\n", type, feature);
411
412 /* XXX sanity check type & feat */
413
414 /* locate our change request */
415 t = type == DCCPO_CONFIRM_L ? DCCPO_CHANGE_R : DCCPO_CHANGE_L;
416
417 list_for_each_entry(opt, &dmsk->dccpms_pending, dccpop_node) {
418 if (!opt->dccpop_conf && opt->dccpop_type == t &&
419 opt->dccpop_feat == feature) {
420 /* we found it */
421 /* XXX do sanity check */
422
423 opt->dccpop_conf = 1;
424
425 /* We got a confirmation---change the option */
426 dccp_feat_update(sk, opt->dccpop_type,
427 opt->dccpop_feat, *val);
428
429 dccp_pr_debug("feat %d type %d confirmed %d\n",
430 feature, type, *val);
431 rc = 0;
432 break;
433 }
434
435 if (!opt->dccpop_conf)
436 all_confirmed = 0;
437 }
438
439 /* fix re-transmit timer */
440 /* XXX gotta make sure that no option negotiation occurs during
441 * connection shutdown. Consider that the CLOSEREQ is sent and timer is
442 * on. if all options are confirmed it might kill timer which should
443 * remain alive until close is received.
444 */
445 if (all_confirmed) {
446 dccp_pr_debug("clear feat negotiation timer %p\n", sk);
447 inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
448 }
449
450 if (rc)
451 dccp_pr_debug("feat %d type %d never requested\n",
452 feature, type);
453 return 0;
454}
455
456EXPORT_SYMBOL_GPL(dccp_feat_confirm_recv);
457
458void dccp_feat_clean(struct dccp_minisock *dmsk)
459{
460 struct dccp_opt_pend *opt, *next;
461
462 list_for_each_entry_safe(opt, next, &dmsk->dccpms_pending,
463 dccpop_node) {
464 BUG_ON(opt->dccpop_val == NULL);
465 kfree(opt->dccpop_val);
466
467 if (opt->dccpop_sc != NULL) {
468 BUG_ON(opt->dccpop_sc->dccpoc_val == NULL);
469 kfree(opt->dccpop_sc->dccpoc_val);
470 kfree(opt->dccpop_sc);
471 }
472
473 kfree(opt);
474 }
475 INIT_LIST_HEAD(&dmsk->dccpms_pending);
476
477 list_for_each_entry_safe(opt, next, &dmsk->dccpms_conf, dccpop_node) {
478 BUG_ON(opt == NULL);
479 if (opt->dccpop_val != NULL)
480 kfree(opt->dccpop_val);
481 kfree(opt);
482 }
483 INIT_LIST_HEAD(&dmsk->dccpms_conf);
484}
485
486EXPORT_SYMBOL_GPL(dccp_feat_clean);
487
488/* this is to be called only when a listening sock creates its child. It is
489 * assumed by the function---the confirm is not duplicated, but rather it is
490 * "passed on".
491 */
492int dccp_feat_clone(struct sock *oldsk, struct sock *newsk)
493{
494 struct dccp_minisock *olddmsk = dccp_msk(oldsk);
495 struct dccp_minisock *newdmsk = dccp_msk(newsk);
496 struct dccp_opt_pend *opt;
497 int rc = 0;
498
499 INIT_LIST_HEAD(&newdmsk->dccpms_pending);
500 INIT_LIST_HEAD(&newdmsk->dccpms_conf);
501
502 list_for_each_entry(opt, &olddmsk->dccpms_pending, dccpop_node) {
503 struct dccp_opt_pend *newopt;
504 /* copy the value of the option */
505 u8 *val = kmalloc(opt->dccpop_len, GFP_ATOMIC);
506
507 if (val == NULL)
508 goto out_clean;
509 memcpy(val, opt->dccpop_val, opt->dccpop_len);
510
511 newopt = kmalloc(sizeof(*newopt), GFP_ATOMIC);
512 if (newopt == NULL) {
513 kfree(val);
514 goto out_clean;
515 }
516
517 /* insert the option */
518 memcpy(newopt, opt, sizeof(*newopt));
519 newopt->dccpop_val = val;
520 list_add_tail(&newopt->dccpop_node, &newdmsk->dccpms_pending);
521
522 /* XXX what happens with backlogs and multiple connections at
523 * once...
524 */
525 /* the master socket no longer needs to worry about confirms */
526 opt->dccpop_sc = 0; /* it's not a memleak---new socket has it */
527
528 /* reset state for a new socket */
529 opt->dccpop_conf = 0;
530 }
531
532 /* XXX not doing anything about the conf queue */
533
534out:
535 return rc;
536
537out_clean:
538 dccp_feat_clean(newdmsk);
539 rc = -ENOMEM;
540 goto out;
541}
542
543EXPORT_SYMBOL_GPL(dccp_feat_clone);
544
545static int __dccp_feat_init(struct dccp_minisock *dmsk, u8 type, u8 feat,
546 u8 *val, u8 len)
547{
548 int rc = -ENOMEM;
549 u8 *copy = kmalloc(len, GFP_KERNEL);
550
551 if (copy != NULL) {
552 memcpy(copy, val, len);
553 rc = dccp_feat_change(dmsk, type, feat, copy, len, GFP_KERNEL);
554 if (rc)
555 kfree(copy);
556 }
557 return rc;
558}
559
560int dccp_feat_init(struct dccp_minisock *dmsk)
561{
562 int rc;
563
564 INIT_LIST_HEAD(&dmsk->dccpms_pending);
565 INIT_LIST_HEAD(&dmsk->dccpms_conf);
566
567 /* CCID L */
568 rc = __dccp_feat_init(dmsk, DCCPO_CHANGE_L, DCCPF_CCID,
569 &dmsk->dccpms_tx_ccid, 1);
570 if (rc)
571 goto out;
572
573 /* CCID R */
574 rc = __dccp_feat_init(dmsk, DCCPO_CHANGE_R, DCCPF_CCID,
575 &dmsk->dccpms_rx_ccid, 1);
576 if (rc)
577 goto out;
578
579 /* Ack ratio */
580 rc = __dccp_feat_init(dmsk, DCCPO_CHANGE_L, DCCPF_ACK_RATIO,
581 &dmsk->dccpms_ack_ratio, 1);
582out:
583 return rc;
584}
585
586EXPORT_SYMBOL_GPL(dccp_feat_init);
diff --git a/net/dccp/feat.h b/net/dccp/feat.h
new file mode 100644
index 000000000000..6048373c7186
--- /dev/null
+++ b/net/dccp/feat.h
@@ -0,0 +1,29 @@
1#ifndef _DCCP_FEAT_H
2#define _DCCP_FEAT_H
3/*
4 * net/dccp/feat.h
5 *
6 * An implementation of the DCCP protocol
7 * Copyright (c) 2005 Andrea Bittau <a.bittau@cs.ucl.ac.uk>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/types.h>
15
16struct sock;
17struct dccp_minisock;
18
19extern int dccp_feat_change(struct dccp_minisock *dmsk, u8 type, u8 feature,
20 u8 *val, u8 len, gfp_t gfp);
21extern int dccp_feat_change_recv(struct sock *sk, u8 type, u8 feature,
22 u8 *val, u8 len);
23extern int dccp_feat_confirm_recv(struct sock *sk, u8 type, u8 feature,
24 u8 *val, u8 len);
25extern void dccp_feat_clean(struct dccp_minisock *dmsk);
26extern int dccp_feat_clone(struct sock *oldsk, struct sock *newsk);
27extern int dccp_feat_init(struct dccp_minisock *dmsk);
28
29#endif /* _DCCP_FEAT_H */
diff --git a/net/dccp/input.c b/net/dccp/input.c
index b6cba72b44e8..bfc53665516b 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -32,7 +32,7 @@ static void dccp_fin(struct sock *sk, struct sk_buff *skb)
32 32
33static void dccp_rcv_close(struct sock *sk, struct sk_buff *skb) 33static void dccp_rcv_close(struct sock *sk, struct sk_buff *skb)
34{ 34{
35 dccp_v4_send_reset(sk, DCCP_RESET_CODE_CLOSED); 35 dccp_send_reset(sk, DCCP_RESET_CODE_CLOSED);
36 dccp_fin(sk, skb); 36 dccp_fin(sk, skb);
37 dccp_set_state(sk, DCCP_CLOSED); 37 dccp_set_state(sk, DCCP_CLOSED);
38 sk_wake_async(sk, 1, POLL_HUP); 38 sk_wake_async(sk, 1, POLL_HUP);
@@ -56,11 +56,11 @@ static void dccp_rcv_closereq(struct sock *sk, struct sk_buff *skb)
56 dccp_send_close(sk, 0); 56 dccp_send_close(sk, 0);
57} 57}
58 58
59static inline void dccp_event_ack_recv(struct sock *sk, struct sk_buff *skb) 59static void dccp_event_ack_recv(struct sock *sk, struct sk_buff *skb)
60{ 60{
61 struct dccp_sock *dp = dccp_sk(sk); 61 struct dccp_sock *dp = dccp_sk(sk);
62 62
63 if (dp->dccps_options.dccpo_send_ack_vector) 63 if (dccp_msk(sk)->dccpms_send_ack_vector)
64 dccp_ackvec_check_rcv_ackno(dp->dccps_hc_rx_ackvec, sk, 64 dccp_ackvec_check_rcv_ackno(dp->dccps_hc_rx_ackvec, sk,
65 DCCP_SKB_CB(skb)->dccpd_ack_seq); 65 DCCP_SKB_CB(skb)->dccpd_ack_seq);
66} 66}
@@ -151,9 +151,8 @@ static int dccp_check_seqno(struct sock *sk, struct sk_buff *skb)
151 return 0; 151 return 0;
152} 152}
153 153
154static inline int __dccp_rcv_established(struct sock *sk, struct sk_buff *skb, 154static int __dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
155 const struct dccp_hdr *dh, 155 const struct dccp_hdr *dh, const unsigned len)
156 const unsigned len)
157{ 156{
158 struct dccp_sock *dp = dccp_sk(sk); 157 struct dccp_sock *dp = dccp_sk(sk);
159 158
@@ -247,7 +246,7 @@ int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
247 if (DCCP_SKB_CB(skb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ) 246 if (DCCP_SKB_CB(skb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
248 dccp_event_ack_recv(sk, skb); 247 dccp_event_ack_recv(sk, skb);
249 248
250 if (dp->dccps_options.dccpo_send_ack_vector && 249 if (dccp_msk(sk)->dccpms_send_ack_vector &&
251 dccp_ackvec_add(dp->dccps_hc_rx_ackvec, sk, 250 dccp_ackvec_add(dp->dccps_hc_rx_ackvec, sk,
252 DCCP_SKB_CB(skb)->dccpd_seq, 251 DCCP_SKB_CB(skb)->dccpd_seq,
253 DCCP_ACKVEC_STATE_RECEIVED)) 252 DCCP_ACKVEC_STATE_RECEIVED))
@@ -300,7 +299,10 @@ static int dccp_rcv_request_sent_state_process(struct sock *sk,
300 goto out_invalid_packet; 299 goto out_invalid_packet;
301 } 300 }
302 301
303 if (dp->dccps_options.dccpo_send_ack_vector && 302 if (dccp_parse_options(sk, skb))
303 goto out_invalid_packet;
304
305 if (dccp_msk(sk)->dccpms_send_ack_vector &&
304 dccp_ackvec_add(dp->dccps_hc_rx_ackvec, sk, 306 dccp_ackvec_add(dp->dccps_hc_rx_ackvec, sk,
305 DCCP_SKB_CB(skb)->dccpd_seq, 307 DCCP_SKB_CB(skb)->dccpd_seq,
306 DCCP_ACKVEC_STATE_RECEIVED)) 308 DCCP_ACKVEC_STATE_RECEIVED))
@@ -321,14 +323,6 @@ static int dccp_rcv_request_sent_state_process(struct sock *sk,
321 dccp_set_seqno(&dp->dccps_swl, 323 dccp_set_seqno(&dp->dccps_swl,
322 max48(dp->dccps_swl, dp->dccps_isr)); 324 max48(dp->dccps_swl, dp->dccps_isr));
323 325
324 if (ccid_hc_rx_init(dp->dccps_hc_rx_ccid, sk) != 0 ||
325 ccid_hc_tx_init(dp->dccps_hc_tx_ccid, sk) != 0) {
326 ccid_hc_rx_exit(dp->dccps_hc_rx_ccid, sk);
327 ccid_hc_tx_exit(dp->dccps_hc_tx_ccid, sk);
328 /* FIXME: send appropriate RESET code */
329 goto out_invalid_packet;
330 }
331
332 dccp_sync_mss(sk, icsk->icsk_pmtu_cookie); 326 dccp_sync_mss(sk, icsk->icsk_pmtu_cookie);
333 327
334 /* 328 /*
@@ -492,7 +486,7 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
492 if (dcb->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ) 486 if (dcb->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
493 dccp_event_ack_recv(sk, skb); 487 dccp_event_ack_recv(sk, skb);
494 488
495 if (dp->dccps_options.dccpo_send_ack_vector && 489 if (dccp_msk(sk)->dccpms_send_ack_vector &&
496 dccp_ackvec_add(dp->dccps_hc_rx_ackvec, sk, 490 dccp_ackvec_add(dp->dccps_hc_rx_ackvec, sk,
497 DCCP_SKB_CB(skb)->dccpd_seq, 491 DCCP_SKB_CB(skb)->dccpd_seq,
498 DCCP_ACKVEC_STATE_RECEIVED)) 492 DCCP_ACKVEC_STATE_RECEIVED))
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index dc0487b5bace..29047995c695 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -18,8 +18,10 @@
18#include <linux/random.h> 18#include <linux/random.h>
19 19
20#include <net/icmp.h> 20#include <net/icmp.h>
21#include <net/inet_common.h>
21#include <net/inet_hashtables.h> 22#include <net/inet_hashtables.h>
22#include <net/inet_sock.h> 23#include <net/inet_sock.h>
24#include <net/protocol.h>
23#include <net/sock.h> 25#include <net/sock.h>
24#include <net/timewait_sock.h> 26#include <net/timewait_sock.h>
25#include <net/tcp_states.h> 27#include <net/tcp_states.h>
@@ -28,14 +30,14 @@
28#include "ackvec.h" 30#include "ackvec.h"
29#include "ccid.h" 31#include "ccid.h"
30#include "dccp.h" 32#include "dccp.h"
33#include "feat.h"
31 34
32struct inet_hashinfo __cacheline_aligned dccp_hashinfo = { 35/*
33 .lhash_lock = RW_LOCK_UNLOCKED, 36 * This is the global socket data structure used for responding to
34 .lhash_users = ATOMIC_INIT(0), 37 * the Out-of-the-blue (OOTB) packets. A control sock will be created
35 .lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(dccp_hashinfo.lhash_wait), 38 * for this socket at the initialization time.
36}; 39 */
37 40static struct socket *dccp_v4_ctl_socket;
38EXPORT_SYMBOL_GPL(dccp_hashinfo);
39 41
40static int dccp_v4_get_port(struct sock *sk, const unsigned short snum) 42static int dccp_v4_get_port(struct sock *sk, const unsigned short snum)
41{ 43{
@@ -43,18 +45,6 @@ static int dccp_v4_get_port(struct sock *sk, const unsigned short snum)
43 inet_csk_bind_conflict); 45 inet_csk_bind_conflict);
44} 46}
45 47
46static void dccp_v4_hash(struct sock *sk)
47{
48 inet_hash(&dccp_hashinfo, sk);
49}
50
51void dccp_unhash(struct sock *sk)
52{
53 inet_unhash(&dccp_hashinfo, sk);
54}
55
56EXPORT_SYMBOL_GPL(dccp_unhash);
57
58int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) 48int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
59{ 49{
60 struct inet_sock *inet = inet_sk(sk); 50 struct inet_sock *inet = inet_sk(sk);
@@ -207,11 +197,12 @@ static inline void dccp_do_pmtu_discovery(struct sock *sk,
207 } /* else let the usual retransmit timer handle it */ 197 } /* else let the usual retransmit timer handle it */
208} 198}
209 199
210static void dccp_v4_ctl_send_ack(struct sk_buff *rxskb) 200static void dccp_v4_reqsk_send_ack(struct sk_buff *rxskb,
201 struct request_sock *req)
211{ 202{
212 int err; 203 int err;
213 struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh; 204 struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh;
214 const int dccp_hdr_ack_len = sizeof(struct dccp_hdr) + 205 const u32 dccp_hdr_ack_len = sizeof(struct dccp_hdr) +
215 sizeof(struct dccp_hdr_ext) + 206 sizeof(struct dccp_hdr_ext) +
216 sizeof(struct dccp_hdr_ack_bits); 207 sizeof(struct dccp_hdr_ack_bits);
217 struct sk_buff *skb; 208 struct sk_buff *skb;
@@ -219,12 +210,12 @@ static void dccp_v4_ctl_send_ack(struct sk_buff *rxskb)
219 if (((struct rtable *)rxskb->dst)->rt_type != RTN_LOCAL) 210 if (((struct rtable *)rxskb->dst)->rt_type != RTN_LOCAL)
220 return; 211 return;
221 212
222 skb = alloc_skb(MAX_DCCP_HEADER + 15, GFP_ATOMIC); 213 skb = alloc_skb(dccp_v4_ctl_socket->sk->sk_prot->max_header, GFP_ATOMIC);
223 if (skb == NULL) 214 if (skb == NULL)
224 return; 215 return;
225 216
226 /* Reserve space for headers. */ 217 /* Reserve space for headers. */
227 skb_reserve(skb, MAX_DCCP_HEADER); 218 skb_reserve(skb, dccp_v4_ctl_socket->sk->sk_prot->max_header);
228 219
229 skb->dst = dst_clone(rxskb->dst); 220 skb->dst = dst_clone(rxskb->dst);
230 221
@@ -243,11 +234,11 @@ static void dccp_v4_ctl_send_ack(struct sk_buff *rxskb)
243 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), 234 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb),
244 DCCP_SKB_CB(rxskb)->dccpd_seq); 235 DCCP_SKB_CB(rxskb)->dccpd_seq);
245 236
246 bh_lock_sock(dccp_ctl_socket->sk); 237 bh_lock_sock(dccp_v4_ctl_socket->sk);
247 err = ip_build_and_send_pkt(skb, dccp_ctl_socket->sk, 238 err = ip_build_and_send_pkt(skb, dccp_v4_ctl_socket->sk,
248 rxskb->nh.iph->daddr, 239 rxskb->nh.iph->daddr,
249 rxskb->nh.iph->saddr, NULL); 240 rxskb->nh.iph->saddr, NULL);
250 bh_unlock_sock(dccp_ctl_socket->sk); 241 bh_unlock_sock(dccp_v4_ctl_socket->sk);
251 242
252 if (err == NET_XMIT_CN || err == 0) { 243 if (err == NET_XMIT_CN || err == 0) {
253 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS); 244 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
@@ -255,12 +246,6 @@ static void dccp_v4_ctl_send_ack(struct sk_buff *rxskb)
255 } 246 }
256} 247}
257 248
258static void dccp_v4_reqsk_send_ack(struct sk_buff *skb,
259 struct request_sock *req)
260{
261 dccp_v4_ctl_send_ack(skb);
262}
263
264static int dccp_v4_send_response(struct sock *sk, struct request_sock *req, 249static int dccp_v4_send_response(struct sock *sk, struct request_sock *req,
265 struct dst_entry *dst) 250 struct dst_entry *dst)
266{ 251{
@@ -275,7 +260,10 @@ static int dccp_v4_send_response(struct sock *sk, struct request_sock *req,
275 skb = dccp_make_response(sk, dst, req); 260 skb = dccp_make_response(sk, dst, req);
276 if (skb != NULL) { 261 if (skb != NULL) {
277 const struct inet_request_sock *ireq = inet_rsk(req); 262 const struct inet_request_sock *ireq = inet_rsk(req);
263 struct dccp_hdr *dh = dccp_hdr(skb);
278 264
265 dh->dccph_checksum = dccp_v4_checksum(skb, ireq->loc_addr,
266 ireq->rmt_addr);
279 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); 267 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
280 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr, 268 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
281 ireq->rmt_addr, 269 ireq->rmt_addr,
@@ -301,7 +289,7 @@ out:
301 * check at all. A more general error queue to queue errors for later handling 289 * check at all. A more general error queue to queue errors for later handling
302 * is probably better. 290 * is probably better.
303 */ 291 */
304void dccp_v4_err(struct sk_buff *skb, u32 info) 292static void dccp_v4_err(struct sk_buff *skb, u32 info)
305{ 293{
306 const struct iphdr *iph = (struct iphdr *)skb->data; 294 const struct iphdr *iph = (struct iphdr *)skb->data;
307 const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + 295 const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data +
@@ -456,32 +444,6 @@ void dccp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb)
456 444
457EXPORT_SYMBOL_GPL(dccp_v4_send_check); 445EXPORT_SYMBOL_GPL(dccp_v4_send_check);
458 446
459int dccp_v4_send_reset(struct sock *sk, enum dccp_reset_codes code)
460{
461 struct sk_buff *skb;
462 /*
463 * FIXME: what if rebuild_header fails?
464 * Should we be doing a rebuild_header here?
465 */
466 int err = inet_sk_rebuild_header(sk);
467
468 if (err != 0)
469 return err;
470
471 skb = dccp_make_reset(sk, sk->sk_dst_cache, code);
472 if (skb != NULL) {
473 const struct inet_sock *inet = inet_sk(sk);
474
475 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
476 err = ip_build_and_send_pkt(skb, sk,
477 inet->saddr, inet->daddr, NULL);
478 if (err == NET_XMIT_CN)
479 err = 0;
480 }
481
482 return err;
483}
484
485static inline u64 dccp_v4_init_sequence(const struct sock *sk, 447static inline u64 dccp_v4_init_sequence(const struct sock *sk,
486 const struct sk_buff *skb) 448 const struct sk_buff *skb)
487{ 449{
@@ -497,9 +459,9 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
497 struct dccp_sock dp; 459 struct dccp_sock dp;
498 struct request_sock *req; 460 struct request_sock *req;
499 struct dccp_request_sock *dreq; 461 struct dccp_request_sock *dreq;
500 const __u32 saddr = skb->nh.iph->saddr; 462 const __be32 saddr = skb->nh.iph->saddr;
501 const __u32 daddr = skb->nh.iph->daddr; 463 const __be32 daddr = skb->nh.iph->daddr;
502 const __u32 service = dccp_hdr_request(skb)->dccph_req_service; 464 const __be32 service = dccp_hdr_request(skb)->dccph_req_service;
503 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); 465 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
504 __u8 reset_code = DCCP_RESET_CODE_TOO_BUSY; 466 __u8 reset_code = DCCP_RESET_CODE_TOO_BUSY;
505 467
@@ -535,7 +497,8 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
535 if (req == NULL) 497 if (req == NULL)
536 goto drop; 498 goto drop;
537 499
538 /* FIXME: process options */ 500 if (dccp_parse_options(sk, skb))
501 goto drop;
539 502
540 dccp_openreq_init(req, &dp, skb); 503 dccp_openreq_init(req, &dp, skb);
541 504
@@ -660,8 +623,8 @@ static struct sock *dccp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
660 return sk; 623 return sk;
661} 624}
662 625
663int dccp_v4_checksum(const struct sk_buff *skb, const u32 saddr, 626int dccp_v4_checksum(const struct sk_buff *skb, const __be32 saddr,
664 const u32 daddr) 627 const __be32 daddr)
665{ 628{
666 const struct dccp_hdr* dh = dccp_hdr(skb); 629 const struct dccp_hdr* dh = dccp_hdr(skb);
667 int checksum_len; 630 int checksum_len;
@@ -680,8 +643,10 @@ int dccp_v4_checksum(const struct sk_buff *skb, const u32 saddr,
680 IPPROTO_DCCP, tmp); 643 IPPROTO_DCCP, tmp);
681} 644}
682 645
646EXPORT_SYMBOL_GPL(dccp_v4_checksum);
647
683static int dccp_v4_verify_checksum(struct sk_buff *skb, 648static int dccp_v4_verify_checksum(struct sk_buff *skb,
684 const u32 saddr, const u32 daddr) 649 const __be32 saddr, const __be32 daddr)
685{ 650{
686 struct dccp_hdr *dh = dccp_hdr(skb); 651 struct dccp_hdr *dh = dccp_hdr(skb);
687 int checksum_len; 652 int checksum_len;
@@ -741,16 +706,17 @@ static void dccp_v4_ctl_send_reset(struct sk_buff *rxskb)
741 if (((struct rtable *)rxskb->dst)->rt_type != RTN_LOCAL) 706 if (((struct rtable *)rxskb->dst)->rt_type != RTN_LOCAL)
742 return; 707 return;
743 708
744 dst = dccp_v4_route_skb(dccp_ctl_socket->sk, rxskb); 709 dst = dccp_v4_route_skb(dccp_v4_ctl_socket->sk, rxskb);
745 if (dst == NULL) 710 if (dst == NULL)
746 return; 711 return;
747 712
748 skb = alloc_skb(MAX_DCCP_HEADER + 15, GFP_ATOMIC); 713 skb = alloc_skb(dccp_v4_ctl_socket->sk->sk_prot->max_header,
714 GFP_ATOMIC);
749 if (skb == NULL) 715 if (skb == NULL)
750 goto out; 716 goto out;
751 717
752 /* Reserve space for headers. */ 718 /* Reserve space for headers. */
753 skb_reserve(skb, MAX_DCCP_HEADER); 719 skb_reserve(skb, dccp_v4_ctl_socket->sk->sk_prot->max_header);
754 skb->dst = dst_clone(dst); 720 skb->dst = dst_clone(dst);
755 721
756 skb->h.raw = skb_push(skb, dccp_hdr_reset_len); 722 skb->h.raw = skb_push(skb, dccp_hdr_reset_len);
@@ -778,11 +744,11 @@ static void dccp_v4_ctl_send_reset(struct sk_buff *rxskb)
778 dh->dccph_checksum = dccp_v4_checksum(skb, rxskb->nh.iph->saddr, 744 dh->dccph_checksum = dccp_v4_checksum(skb, rxskb->nh.iph->saddr,
779 rxskb->nh.iph->daddr); 745 rxskb->nh.iph->daddr);
780 746
781 bh_lock_sock(dccp_ctl_socket->sk); 747 bh_lock_sock(dccp_v4_ctl_socket->sk);
782 err = ip_build_and_send_pkt(skb, dccp_ctl_socket->sk, 748 err = ip_build_and_send_pkt(skb, dccp_v4_ctl_socket->sk,
783 rxskb->nh.iph->daddr, 749 rxskb->nh.iph->daddr,
784 rxskb->nh.iph->saddr, NULL); 750 rxskb->nh.iph->saddr, NULL);
785 bh_unlock_sock(dccp_ctl_socket->sk); 751 bh_unlock_sock(dccp_v4_ctl_socket->sk);
786 752
787 if (err == NET_XMIT_CN || err == 0) { 753 if (err == NET_XMIT_CN || err == 0) {
788 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS); 754 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
@@ -912,7 +878,7 @@ int dccp_invalid_packet(struct sk_buff *skb)
912EXPORT_SYMBOL_GPL(dccp_invalid_packet); 878EXPORT_SYMBOL_GPL(dccp_invalid_packet);
913 879
914/* this is called when real data arrives */ 880/* this is called when real data arrives */
915int dccp_v4_rcv(struct sk_buff *skb) 881static int dccp_v4_rcv(struct sk_buff *skb)
916{ 882{
917 const struct dccp_hdr *dh; 883 const struct dccp_hdr *dh;
918 struct sock *sk; 884 struct sock *sk;
@@ -1019,111 +985,37 @@ do_time_wait:
1019 goto no_dccp_socket; 985 goto no_dccp_socket;
1020} 986}
1021 987
1022struct inet_connection_sock_af_ops dccp_ipv4_af_ops = { 988static struct inet_connection_sock_af_ops dccp_ipv4_af_ops = {
1023 .queue_xmit = ip_queue_xmit, 989 .queue_xmit = ip_queue_xmit,
1024 .send_check = dccp_v4_send_check, 990 .send_check = dccp_v4_send_check,
1025 .rebuild_header = inet_sk_rebuild_header, 991 .rebuild_header = inet_sk_rebuild_header,
1026 .conn_request = dccp_v4_conn_request, 992 .conn_request = dccp_v4_conn_request,
1027 .syn_recv_sock = dccp_v4_request_recv_sock, 993 .syn_recv_sock = dccp_v4_request_recv_sock,
1028 .net_header_len = sizeof(struct iphdr), 994 .net_header_len = sizeof(struct iphdr),
1029 .setsockopt = ip_setsockopt, 995 .setsockopt = ip_setsockopt,
1030 .getsockopt = ip_getsockopt, 996 .getsockopt = ip_getsockopt,
1031 .addr2sockaddr = inet_csk_addr2sockaddr, 997 .addr2sockaddr = inet_csk_addr2sockaddr,
1032 .sockaddr_len = sizeof(struct sockaddr_in), 998 .sockaddr_len = sizeof(struct sockaddr_in),
999#ifdef CONFIG_COMPAT
1000 .compat_setsockopt = compat_ip_setsockopt,
1001 .compat_getsockopt = compat_ip_getsockopt,
1002#endif
1033}; 1003};
1034 1004
1035int dccp_v4_init_sock(struct sock *sk) 1005static int dccp_v4_init_sock(struct sock *sk)
1036{
1037 struct dccp_sock *dp = dccp_sk(sk);
1038 struct inet_connection_sock *icsk = inet_csk(sk);
1039 static int dccp_ctl_socket_init = 1;
1040
1041 dccp_options_init(&dp->dccps_options);
1042 do_gettimeofday(&dp->dccps_epoch);
1043
1044 if (dp->dccps_options.dccpo_send_ack_vector) {
1045 dp->dccps_hc_rx_ackvec = dccp_ackvec_alloc(DCCP_MAX_ACKVEC_LEN,
1046 GFP_KERNEL);
1047 if (dp->dccps_hc_rx_ackvec == NULL)
1048 return -ENOMEM;
1049 }
1050
1051 /*
1052 * FIXME: We're hardcoding the CCID, and doing this at this point makes
1053 * the listening (master) sock get CCID control blocks, which is not
1054 * necessary, but for now, to not mess with the test userspace apps,
1055 * lets leave it here, later the real solution is to do this in a
1056 * setsockopt(CCIDs-I-want/accept). -acme
1057 */
1058 if (likely(!dccp_ctl_socket_init)) {
1059 dp->dccps_hc_rx_ccid = ccid_init(dp->dccps_options.dccpo_rx_ccid,
1060 sk);
1061 dp->dccps_hc_tx_ccid = ccid_init(dp->dccps_options.dccpo_tx_ccid,
1062 sk);
1063 if (dp->dccps_hc_rx_ccid == NULL ||
1064 dp->dccps_hc_tx_ccid == NULL) {
1065 ccid_exit(dp->dccps_hc_rx_ccid, sk);
1066 ccid_exit(dp->dccps_hc_tx_ccid, sk);
1067 if (dp->dccps_options.dccpo_send_ack_vector) {
1068 dccp_ackvec_free(dp->dccps_hc_rx_ackvec);
1069 dp->dccps_hc_rx_ackvec = NULL;
1070 }
1071 dp->dccps_hc_rx_ccid = dp->dccps_hc_tx_ccid = NULL;
1072 return -ENOMEM;
1073 }
1074 } else
1075 dccp_ctl_socket_init = 0;
1076
1077 dccp_init_xmit_timers(sk);
1078 icsk->icsk_rto = DCCP_TIMEOUT_INIT;
1079 sk->sk_state = DCCP_CLOSED;
1080 sk->sk_write_space = dccp_write_space;
1081 icsk->icsk_af_ops = &dccp_ipv4_af_ops;
1082 icsk->icsk_sync_mss = dccp_sync_mss;
1083 dp->dccps_mss_cache = 536;
1084 dp->dccps_role = DCCP_ROLE_UNDEFINED;
1085 dp->dccps_service = DCCP_SERVICE_INVALID_VALUE;
1086
1087 return 0;
1088}
1089
1090EXPORT_SYMBOL_GPL(dccp_v4_init_sock);
1091
1092int dccp_v4_destroy_sock(struct sock *sk)
1093{ 1006{
1094 struct dccp_sock *dp = dccp_sk(sk); 1007 static __u8 dccp_v4_ctl_sock_initialized;
1008 int err = dccp_init_sock(sk, dccp_v4_ctl_sock_initialized);
1095 1009
1096 /* 1010 if (err == 0) {
1097 * DCCP doesn't use sk_write_queue, just sk_send_head 1011 if (unlikely(!dccp_v4_ctl_sock_initialized))
1098 * for retransmissions 1012 dccp_v4_ctl_sock_initialized = 1;
1099 */ 1013 inet_csk(sk)->icsk_af_ops = &dccp_ipv4_af_ops;
1100 if (sk->sk_send_head != NULL) {
1101 kfree_skb(sk->sk_send_head);
1102 sk->sk_send_head = NULL;
1103 } 1014 }
1104 1015
1105 /* Clean up a referenced DCCP bind bucket. */ 1016 return err;
1106 if (inet_csk(sk)->icsk_bind_hash != NULL)
1107 inet_put_port(&dccp_hashinfo, sk);
1108
1109 kfree(dp->dccps_service_list);
1110 dp->dccps_service_list = NULL;
1111
1112 ccid_hc_rx_exit(dp->dccps_hc_rx_ccid, sk);
1113 ccid_hc_tx_exit(dp->dccps_hc_tx_ccid, sk);
1114 if (dp->dccps_options.dccpo_send_ack_vector) {
1115 dccp_ackvec_free(dp->dccps_hc_rx_ackvec);
1116 dp->dccps_hc_rx_ackvec = NULL;
1117 }
1118 ccid_exit(dp->dccps_hc_rx_ccid, sk);
1119 ccid_exit(dp->dccps_hc_tx_ccid, sk);
1120 dp->dccps_hc_rx_ccid = dp->dccps_hc_tx_ccid = NULL;
1121
1122 return 0;
1123} 1017}
1124 1018
1125EXPORT_SYMBOL_GPL(dccp_v4_destroy_sock);
1126
1127static void dccp_v4_reqsk_destructor(struct request_sock *req) 1019static void dccp_v4_reqsk_destructor(struct request_sock *req)
1128{ 1020{
1129 kfree(inet_rsk(req)->opt); 1021 kfree(inet_rsk(req)->opt);
@@ -1142,7 +1034,7 @@ static struct timewait_sock_ops dccp_timewait_sock_ops = {
1142 .twsk_obj_size = sizeof(struct inet_timewait_sock), 1034 .twsk_obj_size = sizeof(struct inet_timewait_sock),
1143}; 1035};
1144 1036
1145struct proto dccp_prot = { 1037static struct proto dccp_v4_prot = {
1146 .name = "DCCP", 1038 .name = "DCCP",
1147 .owner = THIS_MODULE, 1039 .owner = THIS_MODULE,
1148 .close = dccp_close, 1040 .close = dccp_close,
@@ -1155,17 +1047,110 @@ struct proto dccp_prot = {
1155 .sendmsg = dccp_sendmsg, 1047 .sendmsg = dccp_sendmsg,
1156 .recvmsg = dccp_recvmsg, 1048 .recvmsg = dccp_recvmsg,
1157 .backlog_rcv = dccp_v4_do_rcv, 1049 .backlog_rcv = dccp_v4_do_rcv,
1158 .hash = dccp_v4_hash, 1050 .hash = dccp_hash,
1159 .unhash = dccp_unhash, 1051 .unhash = dccp_unhash,
1160 .accept = inet_csk_accept, 1052 .accept = inet_csk_accept,
1161 .get_port = dccp_v4_get_port, 1053 .get_port = dccp_v4_get_port,
1162 .shutdown = dccp_shutdown, 1054 .shutdown = dccp_shutdown,
1163 .destroy = dccp_v4_destroy_sock, 1055 .destroy = dccp_destroy_sock,
1164 .orphan_count = &dccp_orphan_count, 1056 .orphan_count = &dccp_orphan_count,
1165 .max_header = MAX_DCCP_HEADER, 1057 .max_header = MAX_DCCP_HEADER,
1166 .obj_size = sizeof(struct dccp_sock), 1058 .obj_size = sizeof(struct dccp_sock),
1167 .rsk_prot = &dccp_request_sock_ops, 1059 .rsk_prot = &dccp_request_sock_ops,
1168 .twsk_prot = &dccp_timewait_sock_ops, 1060 .twsk_prot = &dccp_timewait_sock_ops,
1061#ifdef CONFIG_COMPAT
1062 .compat_setsockopt = compat_dccp_setsockopt,
1063 .compat_getsockopt = compat_dccp_getsockopt,
1064#endif
1065};
1066
1067static struct net_protocol dccp_v4_protocol = {
1068 .handler = dccp_v4_rcv,
1069 .err_handler = dccp_v4_err,
1070 .no_policy = 1,
1071};
1072
1073static const struct proto_ops inet_dccp_ops = {
1074 .family = PF_INET,
1075 .owner = THIS_MODULE,
1076 .release = inet_release,
1077 .bind = inet_bind,
1078 .connect = inet_stream_connect,
1079 .socketpair = sock_no_socketpair,
1080 .accept = inet_accept,
1081 .getname = inet_getname,
1082 /* FIXME: work on tcp_poll to rename it to inet_csk_poll */
1083 .poll = dccp_poll,
1084 .ioctl = inet_ioctl,
1085 /* FIXME: work on inet_listen to rename it to sock_common_listen */
1086 .listen = inet_dccp_listen,
1087 .shutdown = inet_shutdown,
1088 .setsockopt = sock_common_setsockopt,
1089 .getsockopt = sock_common_getsockopt,
1090 .sendmsg = inet_sendmsg,
1091 .recvmsg = sock_common_recvmsg,
1092 .mmap = sock_no_mmap,
1093 .sendpage = sock_no_sendpage,
1094#ifdef CONFIG_COMPAT
1095 .compat_setsockopt = compat_sock_common_setsockopt,
1096 .compat_getsockopt = compat_sock_common_getsockopt,
1097#endif
1169}; 1098};
1170 1099
1171EXPORT_SYMBOL_GPL(dccp_prot); 1100static struct inet_protosw dccp_v4_protosw = {
1101 .type = SOCK_DCCP,
1102 .protocol = IPPROTO_DCCP,
1103 .prot = &dccp_v4_prot,
1104 .ops = &inet_dccp_ops,
1105 .capability = -1,
1106 .no_check = 0,
1107 .flags = INET_PROTOSW_ICSK,
1108};
1109
1110static int __init dccp_v4_init(void)
1111{
1112 int err = proto_register(&dccp_v4_prot, 1);
1113
1114 if (err != 0)
1115 goto out;
1116
1117 err = inet_add_protocol(&dccp_v4_protocol, IPPROTO_DCCP);
1118 if (err != 0)
1119 goto out_proto_unregister;
1120
1121 inet_register_protosw(&dccp_v4_protosw);
1122
1123 err = inet_csk_ctl_sock_create(&dccp_v4_ctl_socket, PF_INET,
1124 SOCK_DCCP, IPPROTO_DCCP);
1125 if (err)
1126 goto out_unregister_protosw;
1127out:
1128 return err;
1129out_unregister_protosw:
1130 inet_unregister_protosw(&dccp_v4_protosw);
1131 inet_del_protocol(&dccp_v4_protocol, IPPROTO_DCCP);
1132out_proto_unregister:
1133 proto_unregister(&dccp_v4_prot);
1134 goto out;
1135}
1136
1137static void __exit dccp_v4_exit(void)
1138{
1139 inet_unregister_protosw(&dccp_v4_protosw);
1140 inet_del_protocol(&dccp_v4_protocol, IPPROTO_DCCP);
1141 proto_unregister(&dccp_v4_prot);
1142}
1143
1144module_init(dccp_v4_init);
1145module_exit(dccp_v4_exit);
1146
1147/*
1148 * __stringify doesn't likes enums, so use SOCK_DCCP (6) and IPPROTO_DCCP (33)
1149 * values directly, Also cover the case where the protocol is not specified,
1150 * i.e. net-pf-PF_INET-proto-0-type-SOCK_DCCP
1151 */
1152MODULE_ALIAS("net-pf-" __stringify(PF_INET) "-proto-33-type-6");
1153MODULE_ALIAS("net-pf-" __stringify(PF_INET) "-proto-0-type-6");
1154MODULE_LICENSE("GPL");
1155MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>");
1156MODULE_DESCRIPTION("DCCP - Datagram Congestion Controlled Protocol");
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 80c4d048869e..65e2ab0886e6 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * DCCP over IPv6 2 * DCCP over IPv6
3 * Linux INET6 implementation 3 * Linux INET6 implementation
4 * 4 *
5 * Based on net/dccp6/ipv6.c 5 * Based on net/dccp6/ipv6.c
6 * 6 *
@@ -33,6 +33,9 @@
33#include "dccp.h" 33#include "dccp.h"
34#include "ipv6.h" 34#include "ipv6.h"
35 35
36/* Socket used for sending RSTs and ACKs */
37static struct socket *dccp_v6_ctl_socket;
38
36static void dccp_v6_ctl_send_reset(struct sk_buff *skb); 39static void dccp_v6_ctl_send_reset(struct sk_buff *skb);
37static void dccp_v6_reqsk_send_ack(struct sk_buff *skb, 40static void dccp_v6_reqsk_send_ack(struct sk_buff *skb,
38 struct request_sock *req); 41 struct request_sock *req);
@@ -53,7 +56,7 @@ static void dccp_v6_hash(struct sock *sk)
53{ 56{
54 if (sk->sk_state != DCCP_CLOSED) { 57 if (sk->sk_state != DCCP_CLOSED) {
55 if (inet_csk(sk)->icsk_af_ops == &dccp_ipv6_mapped) { 58 if (inet_csk(sk)->icsk_af_ops == &dccp_ipv6_mapped) {
56 dccp_prot.hash(sk); 59 dccp_hash(sk);
57 return; 60 return;
58 } 61 }
59 local_bh_disable(); 62 local_bh_disable();
@@ -63,8 +66,8 @@ static void dccp_v6_hash(struct sock *sk)
63} 66}
64 67
65static inline u16 dccp_v6_check(struct dccp_hdr *dh, int len, 68static inline u16 dccp_v6_check(struct dccp_hdr *dh, int len,
66 struct in6_addr *saddr, 69 struct in6_addr *saddr,
67 struct in6_addr *daddr, 70 struct in6_addr *daddr,
68 unsigned long base) 71 unsigned long base)
69{ 72{
70 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_DCCP, base); 73 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_DCCP, base);
@@ -79,17 +82,17 @@ static __u32 dccp_v6_init_sequence(struct sock *sk, struct sk_buff *skb)
79 skb->nh.ipv6h->saddr.s6_addr32, 82 skb->nh.ipv6h->saddr.s6_addr32,
80 dh->dccph_dport, 83 dh->dccph_dport,
81 dh->dccph_sport); 84 dh->dccph_sport);
82 else 85
83 return secure_dccp_sequence_number(skb->nh.iph->daddr, 86 return secure_dccp_sequence_number(skb->nh.iph->daddr,
84 skb->nh.iph->saddr, 87 skb->nh.iph->saddr,
85 dh->dccph_dport, 88 dh->dccph_dport,
86 dh->dccph_sport); 89 dh->dccph_sport);
87} 90}
88 91
89static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr, 92static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
90 int addr_len) 93 int addr_len)
91{ 94{
92 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; 95 struct sockaddr_in6 *usin = (struct sockaddr_in6 *)uaddr;
93 struct inet_connection_sock *icsk = inet_csk(sk); 96 struct inet_connection_sock *icsk = inet_csk(sk);
94 struct inet_sock *inet = inet_sk(sk); 97 struct inet_sock *inet = inet_sk(sk);
95 struct ipv6_pinfo *np = inet6_sk(sk); 98 struct ipv6_pinfo *np = inet6_sk(sk);
@@ -102,10 +105,10 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
102 105
103 dp->dccps_role = DCCP_ROLE_CLIENT; 106 dp->dccps_role = DCCP_ROLE_CLIENT;
104 107
105 if (addr_len < SIN6_LEN_RFC2133) 108 if (addr_len < SIN6_LEN_RFC2133)
106 return -EINVAL; 109 return -EINVAL;
107 110
108 if (usin->sin6_family != AF_INET6) 111 if (usin->sin6_family != AF_INET6)
109 return -EAFNOSUPPORT; 112 return -EAFNOSUPPORT;
110 113
111 memset(&fl, 0, sizeof(fl)); 114 memset(&fl, 0, sizeof(fl));
@@ -122,17 +125,15 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
122 fl6_sock_release(flowlabel); 125 fl6_sock_release(flowlabel);
123 } 126 }
124 } 127 }
125
126 /* 128 /*
127 * connect() to INADDR_ANY means loopback (BSD'ism). 129 * connect() to INADDR_ANY means loopback (BSD'ism).
128 */ 130 */
129 131 if (ipv6_addr_any(&usin->sin6_addr))
130 if (ipv6_addr_any(&usin->sin6_addr)) 132 usin->sin6_addr.s6_addr[15] = 1;
131 usin->sin6_addr.s6_addr[15] = 0x1;
132 133
133 addr_type = ipv6_addr_type(&usin->sin6_addr); 134 addr_type = ipv6_addr_type(&usin->sin6_addr);
134 135
135 if(addr_type & IPV6_ADDR_MULTICAST) 136 if (addr_type & IPV6_ADDR_MULTICAST)
136 return -ENETUNREACH; 137 return -ENETUNREACH;
137 138
138 if (addr_type & IPV6_ADDR_LINKLOCAL) { 139 if (addr_type & IPV6_ADDR_LINKLOCAL) {
@@ -157,9 +158,8 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
157 np->flow_label = fl.fl6_flowlabel; 158 np->flow_label = fl.fl6_flowlabel;
158 159
159 /* 160 /*
160 * DCCP over IPv4 161 * DCCP over IPv4
161 */ 162 */
162
163 if (addr_type == IPV6_ADDR_MAPPED) { 163 if (addr_type == IPV6_ADDR_MAPPED) {
164 u32 exthdrlen = icsk->icsk_ext_hdr_len; 164 u32 exthdrlen = icsk->icsk_ext_hdr_len;
165 struct sockaddr_in sin; 165 struct sockaddr_in sin;
@@ -177,7 +177,6 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
177 sk->sk_backlog_rcv = dccp_v4_do_rcv; 177 sk->sk_backlog_rcv = dccp_v4_do_rcv;
178 178
179 err = dccp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin)); 179 err = dccp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
180
181 if (err) { 180 if (err) {
182 icsk->icsk_ext_hdr_len = exthdrlen; 181 icsk->icsk_ext_hdr_len = exthdrlen;
183 icsk->icsk_af_ops = &dccp_ipv6_af_ops; 182 icsk->icsk_af_ops = &dccp_ipv6_af_ops;
@@ -203,8 +202,9 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
203 fl.fl_ip_dport = usin->sin6_port; 202 fl.fl_ip_dport = usin->sin6_port;
204 fl.fl_ip_sport = inet->sport; 203 fl.fl_ip_sport = inet->sport;
205 204
206 if (np->opt && np->opt->srcrt) { 205 if (np->opt != NULL && np->opt->srcrt != NULL) {
207 struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt; 206 const struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
207
208 ipv6_addr_copy(&final, &fl.fl6_dst); 208 ipv6_addr_copy(&final, &fl.fl6_dst);
209 ipv6_addr_copy(&fl.fl6_dst, rt0->addr); 209 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
210 final_p = &final; 210 final_p = &final;
@@ -213,10 +213,12 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
213 err = ip6_dst_lookup(sk, &dst, &fl); 213 err = ip6_dst_lookup(sk, &dst, &fl);
214 if (err) 214 if (err)
215 goto failure; 215 goto failure;
216
216 if (final_p) 217 if (final_p)
217 ipv6_addr_copy(&fl.fl6_dst, final_p); 218 ipv6_addr_copy(&fl.fl6_dst, final_p);
218 219
219 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) 220 err = xfrm_lookup(&dst, &fl, sk, 0);
221 if (err < 0)
220 goto failure; 222 goto failure;
221 223
222 if (saddr == NULL) { 224 if (saddr == NULL) {
@@ -231,7 +233,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
231 ip6_dst_store(sk, dst, NULL); 233 ip6_dst_store(sk, dst, NULL);
232 234
233 icsk->icsk_ext_hdr_len = 0; 235 icsk->icsk_ext_hdr_len = 0;
234 if (np->opt) 236 if (np->opt != NULL)
235 icsk->icsk_ext_hdr_len = (np->opt->opt_flen + 237 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
236 np->opt->opt_nflen); 238 np->opt->opt_nflen);
237 239
@@ -264,7 +266,7 @@ failure:
264} 266}
265 267
266static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 268static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
267 int type, int code, int offset, __u32 info) 269 int type, int code, int offset, __be32 info)
268{ 270{
269 struct ipv6hdr *hdr = (struct ipv6hdr *)skb->data; 271 struct ipv6hdr *hdr = (struct ipv6hdr *)skb->data;
270 const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset); 272 const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset);
@@ -305,7 +307,6 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
305 307
306 /* icmp should have updated the destination cache entry */ 308 /* icmp should have updated the destination cache entry */
307 dst = __sk_dst_check(sk, np->dst_cookie); 309 dst = __sk_dst_check(sk, np->dst_cookie);
308
309 if (dst == NULL) { 310 if (dst == NULL) {
310 struct inet_sock *inet = inet_sk(sk); 311 struct inet_sock *inet = inet_sk(sk);
311 struct flowi fl; 312 struct flowi fl;
@@ -322,16 +323,17 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
322 fl.fl_ip_dport = inet->dport; 323 fl.fl_ip_dport = inet->dport;
323 fl.fl_ip_sport = inet->sport; 324 fl.fl_ip_sport = inet->sport;
324 325
325 if ((err = ip6_dst_lookup(sk, &dst, &fl))) { 326 err = ip6_dst_lookup(sk, &dst, &fl);
327 if (err) {
326 sk->sk_err_soft = -err; 328 sk->sk_err_soft = -err;
327 goto out; 329 goto out;
328 } 330 }
329 331
330 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) { 332 err = xfrm_lookup(&dst, &fl, sk, 0);
333 if (err < 0) {
331 sk->sk_err_soft = -err; 334 sk->sk_err_soft = -err;
332 goto out; 335 goto out;
333 } 336 }
334
335 } else 337 } else
336 dst_hold(dst); 338 dst_hold(dst);
337 339
@@ -355,11 +357,12 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
355 req = inet6_csk_search_req(sk, &prev, dh->dccph_dport, 357 req = inet6_csk_search_req(sk, &prev, dh->dccph_dport,
356 &hdr->daddr, &hdr->saddr, 358 &hdr->daddr, &hdr->saddr,
357 inet6_iif(skb)); 359 inet6_iif(skb));
358 if (!req) 360 if (req == NULL)
359 goto out; 361 goto out;
360 362
361 /* ICMPs are not backlogged, hence we cannot get 363 /*
362 * an established socket here. 364 * ICMPs are not backlogged, hence we cannot get an established
365 * socket here.
363 */ 366 */
364 BUG_TRAP(req->sk == NULL); 367 BUG_TRAP(req->sk == NULL);
365 368
@@ -373,7 +376,7 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
373 376
374 case DCCP_REQUESTING: 377 case DCCP_REQUESTING:
375 case DCCP_RESPOND: /* Cannot happen. 378 case DCCP_RESPOND: /* Cannot happen.
376 It can, it SYNs are crossed. --ANK */ 379 It can, it SYNs are crossed. --ANK */
377 if (!sock_owned_by_user(sk)) { 380 if (!sock_owned_by_user(sk)) {
378 DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS); 381 DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
379 sk->sk_err = err; 382 sk->sk_err = err;
@@ -382,7 +385,6 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
382 * (see connect in sock.c) 385 * (see connect in sock.c)
383 */ 386 */
384 sk->sk_error_report(sk); 387 sk->sk_error_report(sk);
385
386 dccp_done(sk); 388 dccp_done(sk);
387 } else 389 } else
388 sk->sk_err_soft = err; 390 sk->sk_err_soft = err;
@@ -428,14 +430,16 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
428 ireq6->pktopts) { 430 ireq6->pktopts) {
429 struct sk_buff *pktopts = ireq6->pktopts; 431 struct sk_buff *pktopts = ireq6->pktopts;
430 struct inet6_skb_parm *rxopt = IP6CB(pktopts); 432 struct inet6_skb_parm *rxopt = IP6CB(pktopts);
433
431 if (rxopt->srcrt) 434 if (rxopt->srcrt)
432 opt = ipv6_invert_rthdr(sk, 435 opt = ipv6_invert_rthdr(sk,
433 (struct ipv6_rt_hdr *)(pktopts->nh.raw + 436 (struct ipv6_rt_hdr *)(pktopts->nh.raw +
434 rxopt->srcrt)); 437 rxopt->srcrt));
435 } 438 }
436 439
437 if (opt && opt->srcrt) { 440 if (opt != NULL && opt->srcrt != NULL) {
438 struct rt0_hdr *rt0 = (struct rt0_hdr *)opt->srcrt; 441 const struct rt0_hdr *rt0 = (struct rt0_hdr *)opt->srcrt;
442
439 ipv6_addr_copy(&final, &fl.fl6_dst); 443 ipv6_addr_copy(&final, &fl.fl6_dst);
440 ipv6_addr_copy(&fl.fl6_dst, rt0->addr); 444 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
441 final_p = &final; 445 final_p = &final;
@@ -444,15 +448,19 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
444 err = ip6_dst_lookup(sk, &dst, &fl); 448 err = ip6_dst_lookup(sk, &dst, &fl);
445 if (err) 449 if (err)
446 goto done; 450 goto done;
451
447 if (final_p) 452 if (final_p)
448 ipv6_addr_copy(&fl.fl6_dst, final_p); 453 ipv6_addr_copy(&fl.fl6_dst, final_p);
449 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) 454
455 err = xfrm_lookup(&dst, &fl, sk, 0);
456 if (err < 0)
450 goto done; 457 goto done;
451 } 458 }
452 459
453 skb = dccp_make_response(sk, dst, req); 460 skb = dccp_make_response(sk, dst, req);
454 if (skb != NULL) { 461 if (skb != NULL) {
455 struct dccp_hdr *dh = dccp_hdr(skb); 462 struct dccp_hdr *dh = dccp_hdr(skb);
463
456 dh->dccph_checksum = dccp_v6_check(dh, skb->len, 464 dh->dccph_checksum = dccp_v6_check(dh, skb->len,
457 &ireq6->loc_addr, 465 &ireq6->loc_addr,
458 &ireq6->rmt_addr, 466 &ireq6->rmt_addr,
@@ -466,7 +474,7 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
466 } 474 }
467 475
468done: 476done:
469 if (opt && opt != np->opt) 477 if (opt != NULL && opt != np->opt)
470 sock_kfree_s(sk, opt, opt->tot_len); 478 sock_kfree_s(sk, opt, opt->tot_len);
471 dst_release(dst); 479 dst_release(dst);
472 return err; 480 return err;
@@ -497,7 +505,7 @@ static void dccp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb)
497 struct dccp_hdr *dh = dccp_hdr(skb); 505 struct dccp_hdr *dh = dccp_hdr(skb);
498 506
499 dh->dccph_checksum = csum_ipv6_magic(&np->saddr, &np->daddr, 507 dh->dccph_checksum = csum_ipv6_magic(&np->saddr, &np->daddr,
500 len, IPPROTO_DCCP, 508 len, IPPROTO_DCCP,
501 csum_partial((char *)dh, 509 csum_partial((char *)dh,
502 dh->dccph_doff << 2, 510 dh->dccph_doff << 2,
503 skb->csum)); 511 skb->csum));
@@ -505,8 +513,8 @@ static void dccp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb)
505 513
506static void dccp_v6_ctl_send_reset(struct sk_buff *rxskb) 514static void dccp_v6_ctl_send_reset(struct sk_buff *rxskb)
507{ 515{
508 struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh; 516 struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh;
509 const int dccp_hdr_reset_len = sizeof(struct dccp_hdr) + 517 const u32 dccp_hdr_reset_len = sizeof(struct dccp_hdr) +
510 sizeof(struct dccp_hdr_ext) + 518 sizeof(struct dccp_hdr_ext) +
511 sizeof(struct dccp_hdr_reset); 519 sizeof(struct dccp_hdr_reset);
512 struct sk_buff *skb; 520 struct sk_buff *skb;
@@ -517,20 +525,14 @@ static void dccp_v6_ctl_send_reset(struct sk_buff *rxskb)
517 return; 525 return;
518 526
519 if (!ipv6_unicast_destination(rxskb)) 527 if (!ipv6_unicast_destination(rxskb))
520 return; 528 return;
521
522 /*
523 * We need to grab some memory, and put together an RST,
524 * and then put it into the queue to be sent.
525 */
526 529
527 skb = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + 530 skb = alloc_skb(dccp_v6_ctl_socket->sk->sk_prot->max_header,
528 dccp_hdr_reset_len, GFP_ATOMIC); 531 GFP_ATOMIC);
529 if (skb == NULL) 532 if (skb == NULL)
530 return; 533 return;
531 534
532 skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr) + 535 skb_reserve(skb, dccp_v6_ctl_socket->sk->sk_prot->max_header);
533 dccp_hdr_reset_len);
534 536
535 skb->h.raw = skb_push(skb, dccp_hdr_reset_len); 537 skb->h.raw = skb_push(skb, dccp_hdr_reset_len);
536 dh = dccp_hdr(skb); 538 dh = dccp_hdr(skb);
@@ -568,7 +570,7 @@ static void dccp_v6_ctl_send_reset(struct sk_buff *rxskb)
568 /* sk = NULL, but it is safe for now. RST socket required. */ 570 /* sk = NULL, but it is safe for now. RST socket required. */
569 if (!ip6_dst_lookup(NULL, &skb->dst, &fl)) { 571 if (!ip6_dst_lookup(NULL, &skb->dst, &fl)) {
570 if (xfrm_lookup(&skb->dst, &fl, NULL, 0) >= 0) { 572 if (xfrm_lookup(&skb->dst, &fl, NULL, 0) >= 0) {
571 ip6_xmit(NULL, skb, &fl, NULL, 0); 573 ip6_xmit(dccp_v6_ctl_socket->sk, skb, &fl, NULL, 0);
572 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS); 574 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
573 DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS); 575 DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS);
574 return; 576 return;
@@ -578,22 +580,22 @@ static void dccp_v6_ctl_send_reset(struct sk_buff *rxskb)
578 kfree_skb(skb); 580 kfree_skb(skb);
579} 581}
580 582
581static void dccp_v6_ctl_send_ack(struct sk_buff *rxskb) 583static void dccp_v6_reqsk_send_ack(struct sk_buff *rxskb,
584 struct request_sock *req)
582{ 585{
583 struct flowi fl; 586 struct flowi fl;
584 struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh; 587 struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh;
585 const int dccp_hdr_ack_len = sizeof(struct dccp_hdr) + 588 const u32 dccp_hdr_ack_len = sizeof(struct dccp_hdr) +
586 sizeof(struct dccp_hdr_ext) + 589 sizeof(struct dccp_hdr_ext) +
587 sizeof(struct dccp_hdr_ack_bits); 590 sizeof(struct dccp_hdr_ack_bits);
588 struct sk_buff *skb; 591 struct sk_buff *skb;
589 592
590 skb = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + 593 skb = alloc_skb(dccp_v6_ctl_socket->sk->sk_prot->max_header,
591 dccp_hdr_ack_len, GFP_ATOMIC); 594 GFP_ATOMIC);
592 if (skb == NULL) 595 if (skb == NULL)
593 return; 596 return;
594 597
595 skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr) + 598 skb_reserve(skb, dccp_v6_ctl_socket->sk->sk_prot->max_header);
596 dccp_hdr_ack_len);
597 599
598 skb->h.raw = skb_push(skb, dccp_hdr_ack_len); 600 skb->h.raw = skb_push(skb, dccp_hdr_ack_len);
599 dh = dccp_hdr(skb); 601 dh = dccp_hdr(skb);
@@ -605,7 +607,7 @@ static void dccp_v6_ctl_send_ack(struct sk_buff *rxskb)
605 dh->dccph_dport = rxdh->dccph_sport; 607 dh->dccph_dport = rxdh->dccph_sport;
606 dh->dccph_doff = dccp_hdr_ack_len / 4; 608 dh->dccph_doff = dccp_hdr_ack_len / 4;
607 dh->dccph_x = 1; 609 dh->dccph_x = 1;
608 610
609 dccp_hdr_set_seq(dh, DCCP_SKB_CB(rxskb)->dccpd_ack_seq); 611 dccp_hdr_set_seq(dh, DCCP_SKB_CB(rxskb)->dccpd_ack_seq);
610 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), 612 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb),
611 DCCP_SKB_CB(rxskb)->dccpd_seq); 613 DCCP_SKB_CB(rxskb)->dccpd_seq);
@@ -623,7 +625,7 @@ static void dccp_v6_ctl_send_ack(struct sk_buff *rxskb)
623 625
624 if (!ip6_dst_lookup(NULL, &skb->dst, &fl)) { 626 if (!ip6_dst_lookup(NULL, &skb->dst, &fl)) {
625 if (xfrm_lookup(&skb->dst, &fl, NULL, 0) >= 0) { 627 if (xfrm_lookup(&skb->dst, &fl, NULL, 0) >= 0) {
626 ip6_xmit(NULL, skb, &fl, NULL, 0); 628 ip6_xmit(dccp_v6_ctl_socket->sk, skb, &fl, NULL, 0);
627 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS); 629 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
628 return; 630 return;
629 } 631 }
@@ -632,12 +634,6 @@ static void dccp_v6_ctl_send_ack(struct sk_buff *rxskb)
632 kfree_skb(skb); 634 kfree_skb(skb);
633} 635}
634 636
635static void dccp_v6_reqsk_send_ack(struct sk_buff *skb,
636 struct request_sock *req)
637{
638 dccp_v6_ctl_send_ack(skb);
639}
640
641static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb) 637static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
642{ 638{
643 const struct dccp_hdr *dh = dccp_hdr(skb); 639 const struct dccp_hdr *dh = dccp_hdr(skb);
@@ -657,7 +653,6 @@ static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
657 &iph->saddr, dh->dccph_sport, 653 &iph->saddr, dh->dccph_sport,
658 &iph->daddr, ntohs(dh->dccph_dport), 654 &iph->daddr, ntohs(dh->dccph_dport),
659 inet6_iif(skb)); 655 inet6_iif(skb));
660
661 if (nsk != NULL) { 656 if (nsk != NULL) {
662 if (nsk->sk_state != DCCP_TIME_WAIT) { 657 if (nsk->sk_state != DCCP_TIME_WAIT) {
663 bh_lock_sock(nsk); 658 bh_lock_sock(nsk);
@@ -678,7 +673,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
678 struct dccp_request_sock *dreq; 673 struct dccp_request_sock *dreq;
679 struct inet6_request_sock *ireq6; 674 struct inet6_request_sock *ireq6;
680 struct ipv6_pinfo *np = inet6_sk(sk); 675 struct ipv6_pinfo *np = inet6_sk(sk);
681 const __u32 service = dccp_hdr_request(skb)->dccph_req_service; 676 const __be32 service = dccp_hdr_request(skb)->dccph_req_service;
682 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); 677 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
683 __u8 reset_code = DCCP_RESET_CODE_TOO_BUSY; 678 __u8 reset_code = DCCP_RESET_CODE_TOO_BUSY;
684 679
@@ -686,17 +681,17 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
686 return dccp_v4_conn_request(sk, skb); 681 return dccp_v4_conn_request(sk, skb);
687 682
688 if (!ipv6_unicast_destination(skb)) 683 if (!ipv6_unicast_destination(skb))
689 goto drop; 684 goto drop;
690 685
691 if (dccp_bad_service_code(sk, service)) { 686 if (dccp_bad_service_code(sk, service)) {
692 reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE; 687 reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE;
693 goto drop; 688 goto drop;
694 } 689 }
695 /* 690 /*
696 * There are no SYN attacks on IPv6, yet... 691 * There are no SYN attacks on IPv6, yet...
697 */ 692 */
698 if (inet_csk_reqsk_queue_is_full(sk)) 693 if (inet_csk_reqsk_queue_is_full(sk))
699 goto drop; 694 goto drop;
700 695
701 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) 696 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
702 goto drop; 697 goto drop;
@@ -730,7 +725,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
730 ipv6_addr_type(&ireq6->rmt_addr) & IPV6_ADDR_LINKLOCAL) 725 ipv6_addr_type(&ireq6->rmt_addr) & IPV6_ADDR_LINKLOCAL)
731 ireq6->iif = inet6_iif(skb); 726 ireq6->iif = inet6_iif(skb);
732 727
733 /* 728 /*
734 * Step 3: Process LISTEN state 729 * Step 3: Process LISTEN state
735 * 730 *
736 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie 731 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
@@ -774,9 +769,8 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
774 /* 769 /*
775 * v6 mapped 770 * v6 mapped
776 */ 771 */
777
778 newsk = dccp_v4_request_recv_sock(sk, skb, req, dst); 772 newsk = dccp_v4_request_recv_sock(sk, skb, req, dst);
779 if (newsk == NULL) 773 if (newsk == NULL)
780 return NULL; 774 return NULL;
781 775
782 newdp6 = (struct dccp6_sock *)newsk; 776 newdp6 = (struct dccp6_sock *)newsk;
@@ -822,9 +816,9 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
822 if (sk_acceptq_is_full(sk)) 816 if (sk_acceptq_is_full(sk))
823 goto out_overflow; 817 goto out_overflow;
824 818
825 if (np->rxopt.bits.osrcrt == 2 && 819 if (np->rxopt.bits.osrcrt == 2 && opt == NULL && ireq6->pktopts) {
826 opt == NULL && ireq6->pktopts) { 820 const struct inet6_skb_parm *rxopt = IP6CB(ireq6->pktopts);
827 struct inet6_skb_parm *rxopt = IP6CB(ireq6->pktopts); 821
828 if (rxopt->srcrt) 822 if (rxopt->srcrt)
829 opt = ipv6_invert_rthdr(sk, 823 opt = ipv6_invert_rthdr(sk,
830 (struct ipv6_rt_hdr *)(ireq6->pktopts->nh.raw + 824 (struct ipv6_rt_hdr *)(ireq6->pktopts->nh.raw +
@@ -838,8 +832,9 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
838 memset(&fl, 0, sizeof(fl)); 832 memset(&fl, 0, sizeof(fl));
839 fl.proto = IPPROTO_DCCP; 833 fl.proto = IPPROTO_DCCP;
840 ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr); 834 ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr);
841 if (opt && opt->srcrt) { 835 if (opt != NULL && opt->srcrt != NULL) {
842 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt; 836 const struct rt0_hdr *rt0 = (struct rt0_hdr *)opt->srcrt;
837
843 ipv6_addr_copy(&final, &fl.fl6_dst); 838 ipv6_addr_copy(&final, &fl.fl6_dst);
844 ipv6_addr_copy(&fl.fl6_dst, rt0->addr); 839 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
845 final_p = &final; 840 final_p = &final;
@@ -857,7 +852,7 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
857 852
858 if ((xfrm_lookup(&dst, &fl, sk, 0)) < 0) 853 if ((xfrm_lookup(&dst, &fl, sk, 0)) < 0)
859 goto out; 854 goto out;
860 } 855 }
861 856
862 newsk = dccp_create_openreq_child(sk, req, skb); 857 newsk = dccp_create_openreq_child(sk, req, skb);
863 if (newsk == NULL) 858 if (newsk == NULL)
@@ -870,9 +865,8 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
870 */ 865 */
871 866
872 ip6_dst_store(newsk, dst, NULL); 867 ip6_dst_store(newsk, dst, NULL);
873 newsk->sk_route_caps = dst->dev->features & 868 newsk->sk_route_caps = dst->dev->features & ~(NETIF_F_IP_CSUM |
874 ~(NETIF_F_IP_CSUM | NETIF_F_TSO); 869 NETIF_F_TSO);
875
876 newdp6 = (struct dccp6_sock *)newsk; 870 newdp6 = (struct dccp6_sock *)newsk;
877 newinet = inet_sk(newsk); 871 newinet = inet_sk(newsk);
878 newinet->pinet6 = &newdp6->inet6; 872 newinet->pinet6 = &newdp6->inet6;
@@ -886,7 +880,7 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
886 ipv6_addr_copy(&newnp->rcv_saddr, &ireq6->loc_addr); 880 ipv6_addr_copy(&newnp->rcv_saddr, &ireq6->loc_addr);
887 newsk->sk_bound_dev_if = ireq6->iif; 881 newsk->sk_bound_dev_if = ireq6->iif;
888 882
889 /* Now IPv6 options... 883 /* Now IPv6 options...
890 884
891 First: no IPv4 options. 885 First: no IPv4 options.
892 */ 886 */
@@ -908,20 +902,20 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
908 newnp->mcast_oif = inet6_iif(skb); 902 newnp->mcast_oif = inet6_iif(skb);
909 newnp->mcast_hops = skb->nh.ipv6h->hop_limit; 903 newnp->mcast_hops = skb->nh.ipv6h->hop_limit;
910 904
911 /* Clone native IPv6 options from listening socket (if any) 905 /*
912 906 * Clone native IPv6 options from listening socket (if any)
913 Yes, keeping reference count would be much more clever, 907 *
914 but we make one more one thing there: reattach optmem 908 * Yes, keeping reference count would be much more clever, but we make
915 to newsk. 909 * one more one thing there: reattach optmem to newsk.
916 */ 910 */
917 if (opt) { 911 if (opt != NULL) {
918 newnp->opt = ipv6_dup_options(newsk, opt); 912 newnp->opt = ipv6_dup_options(newsk, opt);
919 if (opt != np->opt) 913 if (opt != np->opt)
920 sock_kfree_s(sk, opt, opt->tot_len); 914 sock_kfree_s(sk, opt, opt->tot_len);
921 } 915 }
922 916
923 inet_csk(newsk)->icsk_ext_hdr_len = 0; 917 inet_csk(newsk)->icsk_ext_hdr_len = 0;
924 if (newnp->opt) 918 if (newnp->opt != NULL)
925 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen + 919 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
926 newnp->opt->opt_flen); 920 newnp->opt->opt_flen);
927 921
@@ -938,7 +932,7 @@ out_overflow:
938 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS); 932 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
939out: 933out:
940 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS); 934 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
941 if (opt && opt != np->opt) 935 if (opt != NULL && opt != np->opt)
942 sock_kfree_s(sk, opt, opt->tot_len); 936 sock_kfree_s(sk, opt, opt->tot_len);
943 dst_release(dst); 937 dst_release(dst);
944 return NULL; 938 return NULL;
@@ -972,8 +966,8 @@ static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
972 goto discard; 966 goto discard;
973 967
974 /* 968 /*
975 * socket locking is here for SMP purposes as backlog rcv 969 * socket locking is here for SMP purposes as backlog rcv is currently
976 * is currently called with bh processing disabled. 970 * called with bh processing disabled.
977 */ 971 */
978 972
979 /* Do Stevens' IPV6_PKTOPTIONS. 973 /* Do Stevens' IPV6_PKTOPTIONS.
@@ -998,20 +992,20 @@ static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
998 return 0; 992 return 0;
999 } 993 }
1000 994
1001 if (sk->sk_state == DCCP_LISTEN) { 995 if (sk->sk_state == DCCP_LISTEN) {
1002 struct sock *nsk = dccp_v6_hnd_req(sk, skb); 996 struct sock *nsk = dccp_v6_hnd_req(sk, skb);
1003 if (!nsk)
1004 goto discard;
1005 997
998 if (nsk == NULL)
999 goto discard;
1006 /* 1000 /*
1007 * Queue it on the new socket if the new socket is active, 1001 * Queue it on the new socket if the new socket is active,
1008 * otherwise we just shortcircuit this and continue with 1002 * otherwise we just shortcircuit this and continue with
1009 * the new socket.. 1003 * the new socket..
1010 */ 1004 */
1011 if(nsk != sk) { 1005 if (nsk != sk) {
1012 if (dccp_child_process(sk, nsk, skb)) 1006 if (dccp_child_process(sk, nsk, skb))
1013 goto reset; 1007 goto reset;
1014 if (opt_skb) 1008 if (opt_skb != NULL)
1015 __kfree_skb(opt_skb); 1009 __kfree_skb(opt_skb);
1016 return 0; 1010 return 0;
1017 } 1011 }
@@ -1024,7 +1018,7 @@ static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1024reset: 1018reset:
1025 dccp_v6_ctl_send_reset(skb); 1019 dccp_v6_ctl_send_reset(skb);
1026discard: 1020discard:
1027 if (opt_skb) 1021 if (opt_skb != NULL)
1028 __kfree_skb(opt_skb); 1022 __kfree_skb(opt_skb);
1029 kfree_skb(skb); 1023 kfree_skb(skb);
1030 return 0; 1024 return 0;
@@ -1057,7 +1051,7 @@ static int dccp_v6_rcv(struct sk_buff **pskb)
1057 dh->dccph_sport, 1051 dh->dccph_sport,
1058 &skb->nh.ipv6h->daddr, ntohs(dh->dccph_dport), 1052 &skb->nh.ipv6h->daddr, ntohs(dh->dccph_dport),
1059 inet6_iif(skb)); 1053 inet6_iif(skb));
1060 /* 1054 /*
1061 * Step 2: 1055 * Step 2:
1062 * If no socket ... 1056 * If no socket ...
1063 * Generate Reset(No Connection) unless P.type == Reset 1057 * Generate Reset(No Connection) unless P.type == Reset
@@ -1066,15 +1060,14 @@ static int dccp_v6_rcv(struct sk_buff **pskb)
1066 if (sk == NULL) 1060 if (sk == NULL)
1067 goto no_dccp_socket; 1061 goto no_dccp_socket;
1068 1062
1069 /* 1063 /*
1070 * Step 2: 1064 * Step 2:
1071 * ... or S.state == TIMEWAIT, 1065 * ... or S.state == TIMEWAIT,
1072 * Generate Reset(No Connection) unless P.type == Reset 1066 * Generate Reset(No Connection) unless P.type == Reset
1073 * Drop packet and return 1067 * Drop packet and return
1074 */ 1068 */
1075
1076 if (sk->sk_state == DCCP_TIME_WAIT) 1069 if (sk->sk_state == DCCP_TIME_WAIT)
1077 goto do_time_wait; 1070 goto do_time_wait;
1078 1071
1079 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) 1072 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1080 goto discard_and_relse; 1073 goto discard_and_relse;
@@ -1113,32 +1106,40 @@ do_time_wait:
1113} 1106}
1114 1107
1115static struct inet_connection_sock_af_ops dccp_ipv6_af_ops = { 1108static struct inet_connection_sock_af_ops dccp_ipv6_af_ops = {
1116 .queue_xmit = inet6_csk_xmit, 1109 .queue_xmit = inet6_csk_xmit,
1117 .send_check = dccp_v6_send_check, 1110 .send_check = dccp_v6_send_check,
1118 .rebuild_header = inet6_sk_rebuild_header, 1111 .rebuild_header = inet6_sk_rebuild_header,
1119 .conn_request = dccp_v6_conn_request, 1112 .conn_request = dccp_v6_conn_request,
1120 .syn_recv_sock = dccp_v6_request_recv_sock, 1113 .syn_recv_sock = dccp_v6_request_recv_sock,
1121 .net_header_len = sizeof(struct ipv6hdr), 1114 .net_header_len = sizeof(struct ipv6hdr),
1122 .setsockopt = ipv6_setsockopt, 1115 .setsockopt = ipv6_setsockopt,
1123 .getsockopt = ipv6_getsockopt, 1116 .getsockopt = ipv6_getsockopt,
1124 .addr2sockaddr = inet6_csk_addr2sockaddr, 1117 .addr2sockaddr = inet6_csk_addr2sockaddr,
1125 .sockaddr_len = sizeof(struct sockaddr_in6) 1118 .sockaddr_len = sizeof(struct sockaddr_in6),
1119#ifdef CONFIG_COMPAT
1120 .compat_setsockopt = compat_ipv6_setsockopt,
1121 .compat_getsockopt = compat_ipv6_getsockopt,
1122#endif
1126}; 1123};
1127 1124
1128/* 1125/*
1129 * DCCP over IPv4 via INET6 API 1126 * DCCP over IPv4 via INET6 API
1130 */ 1127 */
1131static struct inet_connection_sock_af_ops dccp_ipv6_mapped = { 1128static struct inet_connection_sock_af_ops dccp_ipv6_mapped = {
1132 .queue_xmit = ip_queue_xmit, 1129 .queue_xmit = ip_queue_xmit,
1133 .send_check = dccp_v4_send_check, 1130 .send_check = dccp_v4_send_check,
1134 .rebuild_header = inet_sk_rebuild_header, 1131 .rebuild_header = inet_sk_rebuild_header,
1135 .conn_request = dccp_v6_conn_request, 1132 .conn_request = dccp_v6_conn_request,
1136 .syn_recv_sock = dccp_v6_request_recv_sock, 1133 .syn_recv_sock = dccp_v6_request_recv_sock,
1137 .net_header_len = sizeof(struct iphdr), 1134 .net_header_len = sizeof(struct iphdr),
1138 .setsockopt = ipv6_setsockopt, 1135 .setsockopt = ipv6_setsockopt,
1139 .getsockopt = ipv6_getsockopt, 1136 .getsockopt = ipv6_getsockopt,
1140 .addr2sockaddr = inet6_csk_addr2sockaddr, 1137 .addr2sockaddr = inet6_csk_addr2sockaddr,
1141 .sockaddr_len = sizeof(struct sockaddr_in6) 1138 .sockaddr_len = sizeof(struct sockaddr_in6),
1139#ifdef CONFIG_COMPAT
1140 .compat_setsockopt = compat_ipv6_setsockopt,
1141 .compat_getsockopt = compat_ipv6_getsockopt,
1142#endif
1142}; 1143};
1143 1144
1144/* NOTE: A lot of things set to zero explicitly by call to 1145/* NOTE: A lot of things set to zero explicitly by call to
@@ -1146,71 +1147,83 @@ static struct inet_connection_sock_af_ops dccp_ipv6_mapped = {
1146 */ 1147 */
1147static int dccp_v6_init_sock(struct sock *sk) 1148static int dccp_v6_init_sock(struct sock *sk)
1148{ 1149{
1149 int err = dccp_v4_init_sock(sk); 1150 static __u8 dccp_v6_ctl_sock_initialized;
1151 int err = dccp_init_sock(sk, dccp_v6_ctl_sock_initialized);
1150 1152
1151 if (err == 0) 1153 if (err == 0) {
1154 if (unlikely(!dccp_v6_ctl_sock_initialized))
1155 dccp_v6_ctl_sock_initialized = 1;
1152 inet_csk(sk)->icsk_af_ops = &dccp_ipv6_af_ops; 1156 inet_csk(sk)->icsk_af_ops = &dccp_ipv6_af_ops;
1157 }
1153 1158
1154 return err; 1159 return err;
1155} 1160}
1156 1161
1157static int dccp_v6_destroy_sock(struct sock *sk) 1162static int dccp_v6_destroy_sock(struct sock *sk)
1158{ 1163{
1159 dccp_v4_destroy_sock(sk); 1164 dccp_destroy_sock(sk);
1160 return inet6_destroy_sock(sk); 1165 return inet6_destroy_sock(sk);
1161} 1166}
1162 1167
1163static struct proto dccp_v6_prot = { 1168static struct proto dccp_v6_prot = {
1164 .name = "DCCPv6", 1169 .name = "DCCPv6",
1165 .owner = THIS_MODULE, 1170 .owner = THIS_MODULE,
1166 .close = dccp_close, 1171 .close = dccp_close,
1167 .connect = dccp_v6_connect, 1172 .connect = dccp_v6_connect,
1168 .disconnect = dccp_disconnect, 1173 .disconnect = dccp_disconnect,
1169 .ioctl = dccp_ioctl, 1174 .ioctl = dccp_ioctl,
1170 .init = dccp_v6_init_sock, 1175 .init = dccp_v6_init_sock,
1171 .setsockopt = dccp_setsockopt, 1176 .setsockopt = dccp_setsockopt,
1172 .getsockopt = dccp_getsockopt, 1177 .getsockopt = dccp_getsockopt,
1173 .sendmsg = dccp_sendmsg, 1178 .sendmsg = dccp_sendmsg,
1174 .recvmsg = dccp_recvmsg, 1179 .recvmsg = dccp_recvmsg,
1175 .backlog_rcv = dccp_v6_do_rcv, 1180 .backlog_rcv = dccp_v6_do_rcv,
1176 .hash = dccp_v6_hash, 1181 .hash = dccp_v6_hash,
1177 .unhash = dccp_unhash, 1182 .unhash = dccp_unhash,
1178 .accept = inet_csk_accept, 1183 .accept = inet_csk_accept,
1179 .get_port = dccp_v6_get_port, 1184 .get_port = dccp_v6_get_port,
1180 .shutdown = dccp_shutdown, 1185 .shutdown = dccp_shutdown,
1181 .destroy = dccp_v6_destroy_sock, 1186 .destroy = dccp_v6_destroy_sock,
1182 .orphan_count = &dccp_orphan_count, 1187 .orphan_count = &dccp_orphan_count,
1183 .max_header = MAX_DCCP_HEADER, 1188 .max_header = MAX_DCCP_HEADER,
1184 .obj_size = sizeof(struct dccp6_sock), 1189 .obj_size = sizeof(struct dccp6_sock),
1185 .rsk_prot = &dccp6_request_sock_ops, 1190 .rsk_prot = &dccp6_request_sock_ops,
1186 .twsk_prot = &dccp6_timewait_sock_ops, 1191 .twsk_prot = &dccp6_timewait_sock_ops,
1192#ifdef CONFIG_COMPAT
1193 .compat_setsockopt = compat_dccp_setsockopt,
1194 .compat_getsockopt = compat_dccp_getsockopt,
1195#endif
1187}; 1196};
1188 1197
1189static struct inet6_protocol dccp_v6_protocol = { 1198static struct inet6_protocol dccp_v6_protocol = {
1190 .handler = dccp_v6_rcv, 1199 .handler = dccp_v6_rcv,
1191 .err_handler = dccp_v6_err, 1200 .err_handler = dccp_v6_err,
1192 .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL, 1201 .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
1193}; 1202};
1194 1203
1195static struct proto_ops inet6_dccp_ops = { 1204static struct proto_ops inet6_dccp_ops = {
1196 .family = PF_INET6, 1205 .family = PF_INET6,
1197 .owner = THIS_MODULE, 1206 .owner = THIS_MODULE,
1198 .release = inet6_release, 1207 .release = inet6_release,
1199 .bind = inet6_bind, 1208 .bind = inet6_bind,
1200 .connect = inet_stream_connect, 1209 .connect = inet_stream_connect,
1201 .socketpair = sock_no_socketpair, 1210 .socketpair = sock_no_socketpair,
1202 .accept = inet_accept, 1211 .accept = inet_accept,
1203 .getname = inet6_getname, 1212 .getname = inet6_getname,
1204 .poll = dccp_poll, 1213 .poll = dccp_poll,
1205 .ioctl = inet6_ioctl, 1214 .ioctl = inet6_ioctl,
1206 .listen = inet_dccp_listen, 1215 .listen = inet_dccp_listen,
1207 .shutdown = inet_shutdown, 1216 .shutdown = inet_shutdown,
1208 .setsockopt = sock_common_setsockopt, 1217 .setsockopt = sock_common_setsockopt,
1209 .getsockopt = sock_common_getsockopt, 1218 .getsockopt = sock_common_getsockopt,
1210 .sendmsg = inet_sendmsg, 1219 .sendmsg = inet_sendmsg,
1211 .recvmsg = sock_common_recvmsg, 1220 .recvmsg = sock_common_recvmsg,
1212 .mmap = sock_no_mmap, 1221 .mmap = sock_no_mmap,
1213 .sendpage = sock_no_sendpage, 1222 .sendpage = sock_no_sendpage,
1223#ifdef CONFIG_COMPAT
1224 .compat_setsockopt = compat_sock_common_setsockopt,
1225 .compat_getsockopt = compat_sock_common_getsockopt,
1226#endif
1214}; 1227};
1215 1228
1216static struct inet_protosw dccp_v6_protosw = { 1229static struct inet_protosw dccp_v6_protosw = {
@@ -1234,8 +1247,16 @@ static int __init dccp_v6_init(void)
1234 goto out_unregister_proto; 1247 goto out_unregister_proto;
1235 1248
1236 inet6_register_protosw(&dccp_v6_protosw); 1249 inet6_register_protosw(&dccp_v6_protosw);
1250
1251 err = inet_csk_ctl_sock_create(&dccp_v6_ctl_socket, PF_INET6,
1252 SOCK_DCCP, IPPROTO_DCCP);
1253 if (err != 0)
1254 goto out_unregister_protosw;
1237out: 1255out:
1238 return err; 1256 return err;
1257out_unregister_protosw:
1258 inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1259 inet6_unregister_protosw(&dccp_v6_protosw);
1239out_unregister_proto: 1260out_unregister_proto:
1240 proto_unregister(&dccp_v6_prot); 1261 proto_unregister(&dccp_v6_prot);
1241 goto out; 1262 goto out;
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index 29261fc198e7..c0349e5b0551 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -22,6 +22,7 @@
22#include "ackvec.h" 22#include "ackvec.h"
23#include "ccid.h" 23#include "ccid.h"
24#include "dccp.h" 24#include "dccp.h"
25#include "feat.h"
25 26
26struct inet_timewait_death_row dccp_death_row = { 27struct inet_timewait_death_row dccp_death_row = {
27 .sysctl_max_tw_buckets = NR_FILE * 2, 28 .sysctl_max_tw_buckets = NR_FILE * 2,
@@ -106,6 +107,7 @@ struct sock *dccp_create_openreq_child(struct sock *sk,
106 const struct dccp_request_sock *dreq = dccp_rsk(req); 107 const struct dccp_request_sock *dreq = dccp_rsk(req);
107 struct inet_connection_sock *newicsk = inet_csk(sk); 108 struct inet_connection_sock *newicsk = inet_csk(sk);
108 struct dccp_sock *newdp = dccp_sk(newsk); 109 struct dccp_sock *newdp = dccp_sk(newsk);
110 struct dccp_minisock *newdmsk = dccp_msk(newsk);
109 111
110 newdp->dccps_role = DCCP_ROLE_SERVER; 112 newdp->dccps_role = DCCP_ROLE_SERVER;
111 newdp->dccps_hc_rx_ackvec = NULL; 113 newdp->dccps_hc_rx_ackvec = NULL;
@@ -114,27 +116,27 @@ struct sock *dccp_create_openreq_child(struct sock *sk,
114 newicsk->icsk_rto = DCCP_TIMEOUT_INIT; 116 newicsk->icsk_rto = DCCP_TIMEOUT_INIT;
115 do_gettimeofday(&newdp->dccps_epoch); 117 do_gettimeofday(&newdp->dccps_epoch);
116 118
117 if (newdp->dccps_options.dccpo_send_ack_vector) { 119 if (dccp_feat_clone(sk, newsk))
120 goto out_free;
121
122 if (newdmsk->dccpms_send_ack_vector) {
118 newdp->dccps_hc_rx_ackvec = 123 newdp->dccps_hc_rx_ackvec =
119 dccp_ackvec_alloc(DCCP_MAX_ACKVEC_LEN, 124 dccp_ackvec_alloc(GFP_ATOMIC);
120 GFP_ATOMIC);
121 /*
122 * XXX: We're using the same CCIDs set on the parent,
123 * i.e. sk_clone copied the master sock and left the
124 * CCID pointers for this child, that is why we do the
125 * __ccid_get calls.
126 */
127 if (unlikely(newdp->dccps_hc_rx_ackvec == NULL)) 125 if (unlikely(newdp->dccps_hc_rx_ackvec == NULL))
128 goto out_free; 126 goto out_free;
129 } 127 }
130 128
131 if (unlikely(ccid_hc_rx_init(newdp->dccps_hc_rx_ccid, 129 newdp->dccps_hc_rx_ccid =
132 newsk) != 0 || 130 ccid_hc_rx_new(newdmsk->dccpms_rx_ccid,
133 ccid_hc_tx_init(newdp->dccps_hc_tx_ccid, 131 newsk, GFP_ATOMIC);
134 newsk) != 0)) { 132 newdp->dccps_hc_tx_ccid =
133 ccid_hc_tx_new(newdmsk->dccpms_tx_ccid,
134 newsk, GFP_ATOMIC);
135 if (unlikely(newdp->dccps_hc_rx_ccid == NULL ||
136 newdp->dccps_hc_tx_ccid == NULL)) {
135 dccp_ackvec_free(newdp->dccps_hc_rx_ackvec); 137 dccp_ackvec_free(newdp->dccps_hc_rx_ackvec);
136 ccid_hc_rx_exit(newdp->dccps_hc_rx_ccid, newsk); 138 ccid_hc_rx_delete(newdp->dccps_hc_rx_ccid, newsk);
137 ccid_hc_tx_exit(newdp->dccps_hc_tx_ccid, newsk); 139 ccid_hc_tx_delete(newdp->dccps_hc_tx_ccid, newsk);
138out_free: 140out_free:
139 /* It is still raw copy of parent, so invalidate 141 /* It is still raw copy of parent, so invalidate
140 * destructor and make plain sk_free() */ 142 * destructor and make plain sk_free() */
@@ -143,9 +145,6 @@ out_free:
143 return NULL; 145 return NULL;
144 } 146 }
145 147
146 __ccid_get(newdp->dccps_hc_rx_ccid);
147 __ccid_get(newdp->dccps_hc_tx_ccid);
148
149 /* 148 /*
150 * Step 3: Process LISTEN state 149 * Step 3: Process LISTEN state
151 * 150 *
@@ -155,7 +154,7 @@ out_free:
155 */ 154 */
156 155
157 /* See dccp_v4_conn_request */ 156 /* See dccp_v4_conn_request */
158 newdp->dccps_options.dccpo_sequence_window = req->rcv_wnd; 157 newdmsk->dccpms_sequence_window = req->rcv_wnd;
159 158
160 newdp->dccps_gar = newdp->dccps_isr = dreq->dreq_isr; 159 newdp->dccps_gar = newdp->dccps_isr = dreq->dreq_isr;
161 dccp_update_gsr(newsk, dreq->dreq_isr); 160 dccp_update_gsr(newsk, dreq->dreq_isr);
diff --git a/net/dccp/options.c b/net/dccp/options.c
index 0a76426c9aea..e9feb2a0c770 100644
--- a/net/dccp/options.c
+++ b/net/dccp/options.c
@@ -21,19 +21,23 @@
21#include "ackvec.h" 21#include "ackvec.h"
22#include "ccid.h" 22#include "ccid.h"
23#include "dccp.h" 23#include "dccp.h"
24#include "feat.h"
24 25
25/* stores the default values for new connection. may be changed with sysctl */ 26int dccp_feat_default_sequence_window = DCCPF_INITIAL_SEQUENCE_WINDOW;
26static const struct dccp_options dccpo_default_values = { 27int dccp_feat_default_rx_ccid = DCCPF_INITIAL_CCID;
27 .dccpo_sequence_window = DCCPF_INITIAL_SEQUENCE_WINDOW, 28int dccp_feat_default_tx_ccid = DCCPF_INITIAL_CCID;
28 .dccpo_rx_ccid = DCCPF_INITIAL_CCID, 29int dccp_feat_default_ack_ratio = DCCPF_INITIAL_ACK_RATIO;
29 .dccpo_tx_ccid = DCCPF_INITIAL_CCID, 30int dccp_feat_default_send_ack_vector = DCCPF_INITIAL_SEND_ACK_VECTOR;
30 .dccpo_send_ack_vector = DCCPF_INITIAL_SEND_ACK_VECTOR, 31int dccp_feat_default_send_ndp_count = DCCPF_INITIAL_SEND_NDP_COUNT;
31 .dccpo_send_ndp_count = DCCPF_INITIAL_SEND_NDP_COUNT,
32};
33 32
34void dccp_options_init(struct dccp_options *dccpo) 33void dccp_minisock_init(struct dccp_minisock *dmsk)
35{ 34{
36 memcpy(dccpo, &dccpo_default_values, sizeof(*dccpo)); 35 dmsk->dccpms_sequence_window = dccp_feat_default_sequence_window;
36 dmsk->dccpms_rx_ccid = dccp_feat_default_rx_ccid;
37 dmsk->dccpms_tx_ccid = dccp_feat_default_tx_ccid;
38 dmsk->dccpms_ack_ratio = dccp_feat_default_ack_ratio;
39 dmsk->dccpms_send_ack_vector = dccp_feat_default_send_ack_vector;
40 dmsk->dccpms_send_ndp_count = dccp_feat_default_send_ndp_count;
37} 41}
38 42
39static u32 dccp_decode_value_var(const unsigned char *bf, const u8 len) 43static u32 dccp_decode_value_var(const unsigned char *bf, const u8 len)
@@ -69,9 +73,12 @@ int dccp_parse_options(struct sock *sk, struct sk_buff *skb)
69 unsigned char opt, len; 73 unsigned char opt, len;
70 unsigned char *value; 74 unsigned char *value;
71 u32 elapsed_time; 75 u32 elapsed_time;
76 int rc;
77 int mandatory = 0;
72 78
73 memset(opt_recv, 0, sizeof(*opt_recv)); 79 memset(opt_recv, 0, sizeof(*opt_recv));
74 80
81 opt = len = 0;
75 while (opt_ptr != opt_end) { 82 while (opt_ptr != opt_end) {
76 opt = *opt_ptr++; 83 opt = *opt_ptr++;
77 len = 0; 84 len = 0;
@@ -100,6 +107,12 @@ int dccp_parse_options(struct sock *sk, struct sk_buff *skb)
100 switch (opt) { 107 switch (opt) {
101 case DCCPO_PADDING: 108 case DCCPO_PADDING:
102 break; 109 break;
110 case DCCPO_MANDATORY:
111 if (mandatory)
112 goto out_invalid_option;
113 if (pkt_type != DCCP_PKT_DATA)
114 mandatory = 1;
115 break;
103 case DCCPO_NDP_COUNT: 116 case DCCPO_NDP_COUNT:
104 if (len > 3) 117 if (len > 3)
105 goto out_invalid_option; 118 goto out_invalid_option;
@@ -108,12 +121,37 @@ int dccp_parse_options(struct sock *sk, struct sk_buff *skb)
108 dccp_pr_debug("%sNDP count=%d\n", debug_prefix, 121 dccp_pr_debug("%sNDP count=%d\n", debug_prefix,
109 opt_recv->dccpor_ndp); 122 opt_recv->dccpor_ndp);
110 break; 123 break;
124 case DCCPO_CHANGE_L:
125 /* fall through */
126 case DCCPO_CHANGE_R:
127 if (len < 2)
128 goto out_invalid_option;
129 rc = dccp_feat_change_recv(sk, opt, *value, value + 1,
130 len - 1);
131 /*
132 * When there is a change error, change_recv is
133 * responsible for dealing with it. i.e. reply with an
134 * empty confirm.
135 * If the change was mandatory, then we need to die.
136 */
137 if (rc && mandatory)
138 goto out_invalid_option;
139 break;
140 case DCCPO_CONFIRM_L:
141 /* fall through */
142 case DCCPO_CONFIRM_R:
143 if (len < 2)
144 goto out_invalid_option;
145 if (dccp_feat_confirm_recv(sk, opt, *value,
146 value + 1, len - 1))
147 goto out_invalid_option;
148 break;
111 case DCCPO_ACK_VECTOR_0: 149 case DCCPO_ACK_VECTOR_0:
112 case DCCPO_ACK_VECTOR_1: 150 case DCCPO_ACK_VECTOR_1:
113 if (pkt_type == DCCP_PKT_DATA) 151 if (pkt_type == DCCP_PKT_DATA)
114 continue; 152 break;
115 153
116 if (dp->dccps_options.dccpo_send_ack_vector && 154 if (dccp_msk(sk)->dccpms_send_ack_vector &&
117 dccp_ackvec_parse(sk, skb, opt, value, len)) 155 dccp_ackvec_parse(sk, skb, opt, value, len))
118 goto out_invalid_option; 156 goto out_invalid_option;
119 break; 157 break;
@@ -121,7 +159,7 @@ int dccp_parse_options(struct sock *sk, struct sk_buff *skb)
121 if (len != 4) 159 if (len != 4)
122 goto out_invalid_option; 160 goto out_invalid_option;
123 161
124 opt_recv->dccpor_timestamp = ntohl(*(u32 *)value); 162 opt_recv->dccpor_timestamp = ntohl(*(__be32 *)value);
125 163
126 dp->dccps_timestamp_echo = opt_recv->dccpor_timestamp; 164 dp->dccps_timestamp_echo = opt_recv->dccpor_timestamp;
127 dccp_timestamp(sk, &dp->dccps_timestamp_time); 165 dccp_timestamp(sk, &dp->dccps_timestamp_time);
@@ -135,7 +173,7 @@ int dccp_parse_options(struct sock *sk, struct sk_buff *skb)
135 if (len != 4 && len != 6 && len != 8) 173 if (len != 4 && len != 6 && len != 8)
136 goto out_invalid_option; 174 goto out_invalid_option;
137 175
138 opt_recv->dccpor_timestamp_echo = ntohl(*(u32 *)value); 176 opt_recv->dccpor_timestamp_echo = ntohl(*(__be32 *)value);
139 177
140 dccp_pr_debug("%sTIMESTAMP_ECHO=%u, len=%d, ackno=%llu, ", 178 dccp_pr_debug("%sTIMESTAMP_ECHO=%u, len=%d, ackno=%llu, ",
141 debug_prefix, 179 debug_prefix,
@@ -149,9 +187,9 @@ int dccp_parse_options(struct sock *sk, struct sk_buff *skb)
149 break; 187 break;
150 188
151 if (len == 6) 189 if (len == 6)
152 elapsed_time = ntohs(*(u16 *)(value + 4)); 190 elapsed_time = ntohs(*(__be16 *)(value + 4));
153 else 191 else
154 elapsed_time = ntohl(*(u32 *)(value + 4)); 192 elapsed_time = ntohl(*(__be32 *)(value + 4));
155 193
156 /* Give precedence to the biggest ELAPSED_TIME */ 194 /* Give precedence to the biggest ELAPSED_TIME */
157 if (elapsed_time > opt_recv->dccpor_elapsed_time) 195 if (elapsed_time > opt_recv->dccpor_elapsed_time)
@@ -165,9 +203,9 @@ int dccp_parse_options(struct sock *sk, struct sk_buff *skb)
165 continue; 203 continue;
166 204
167 if (len == 2) 205 if (len == 2)
168 elapsed_time = ntohs(*(u16 *)value); 206 elapsed_time = ntohs(*(__be16 *)value);
169 else 207 else
170 elapsed_time = ntohl(*(u32 *)value); 208 elapsed_time = ntohl(*(__be32 *)value);
171 209
172 if (elapsed_time > opt_recv->dccpor_elapsed_time) 210 if (elapsed_time > opt_recv->dccpor_elapsed_time)
173 opt_recv->dccpor_elapsed_time = elapsed_time; 211 opt_recv->dccpor_elapsed_time = elapsed_time;
@@ -208,8 +246,15 @@ int dccp_parse_options(struct sock *sk, struct sk_buff *skb)
208 sk, opt, len); 246 sk, opt, len);
209 break; 247 break;
210 } 248 }
249
250 if (opt != DCCPO_MANDATORY)
251 mandatory = 0;
211 } 252 }
212 253
254 /* mandatory was the last byte in option list -> reset connection */
255 if (mandatory)
256 goto out_invalid_option;
257
213 return 0; 258 return 0;
214 259
215out_invalid_option: 260out_invalid_option:
@@ -219,6 +264,8 @@ out_invalid_option:
219 return -1; 264 return -1;
220} 265}
221 266
267EXPORT_SYMBOL_GPL(dccp_parse_options);
268
222static void dccp_encode_value_var(const u32 value, unsigned char *to, 269static void dccp_encode_value_var(const u32 value, unsigned char *to,
223 const unsigned int len) 270 const unsigned int len)
224{ 271{
@@ -237,17 +284,14 @@ static inline int dccp_ndp_len(const int ndp)
237 return likely(ndp <= 0xFF) ? 1 : ndp <= 0xFFFF ? 2 : 3; 284 return likely(ndp <= 0xFF) ? 1 : ndp <= 0xFFFF ? 2 : 3;
238} 285}
239 286
240void dccp_insert_option(struct sock *sk, struct sk_buff *skb, 287int dccp_insert_option(struct sock *sk, struct sk_buff *skb,
241 const unsigned char option, 288 const unsigned char option,
242 const void *value, const unsigned char len) 289 const void *value, const unsigned char len)
243{ 290{
244 unsigned char *to; 291 unsigned char *to;
245 292
246 if (DCCP_SKB_CB(skb)->dccpd_opt_len + len + 2 > DCCP_MAX_OPT_LEN) { 293 if (DCCP_SKB_CB(skb)->dccpd_opt_len + len + 2 > DCCP_MAX_OPT_LEN)
247 LIMIT_NETDEBUG(KERN_INFO "DCCP: packet too small to insert " 294 return -1;
248 "%d option!\n", option);
249 return;
250 }
251 295
252 DCCP_SKB_CB(skb)->dccpd_opt_len += len + 2; 296 DCCP_SKB_CB(skb)->dccpd_opt_len += len + 2;
253 297
@@ -256,11 +300,12 @@ void dccp_insert_option(struct sock *sk, struct sk_buff *skb,
256 *to++ = len + 2; 300 *to++ = len + 2;
257 301
258 memcpy(to, value, len); 302 memcpy(to, value, len);
303 return 0;
259} 304}
260 305
261EXPORT_SYMBOL_GPL(dccp_insert_option); 306EXPORT_SYMBOL_GPL(dccp_insert_option);
262 307
263static void dccp_insert_option_ndp(struct sock *sk, struct sk_buff *skb) 308static int dccp_insert_option_ndp(struct sock *sk, struct sk_buff *skb)
264{ 309{
265 struct dccp_sock *dp = dccp_sk(sk); 310 struct dccp_sock *dp = dccp_sk(sk);
266 int ndp = dp->dccps_ndp_count; 311 int ndp = dp->dccps_ndp_count;
@@ -276,7 +321,7 @@ static void dccp_insert_option_ndp(struct sock *sk, struct sk_buff *skb)
276 const int len = ndp_len + 2; 321 const int len = ndp_len + 2;
277 322
278 if (DCCP_SKB_CB(skb)->dccpd_opt_len + len > DCCP_MAX_OPT_LEN) 323 if (DCCP_SKB_CB(skb)->dccpd_opt_len + len > DCCP_MAX_OPT_LEN)
279 return; 324 return -1;
280 325
281 DCCP_SKB_CB(skb)->dccpd_opt_len += len; 326 DCCP_SKB_CB(skb)->dccpd_opt_len += len;
282 327
@@ -285,6 +330,8 @@ static void dccp_insert_option_ndp(struct sock *sk, struct sk_buff *skb)
285 *ptr++ = len; 330 *ptr++ = len;
286 dccp_encode_value_var(ndp, ptr, ndp_len); 331 dccp_encode_value_var(ndp, ptr, ndp_len);
287 } 332 }
333
334 return 0;
288} 335}
289 336
290static inline int dccp_elapsed_time_len(const u32 elapsed_time) 337static inline int dccp_elapsed_time_len(const u32 elapsed_time)
@@ -292,27 +339,18 @@ static inline int dccp_elapsed_time_len(const u32 elapsed_time)
292 return elapsed_time == 0 ? 0 : elapsed_time <= 0xFFFF ? 2 : 4; 339 return elapsed_time == 0 ? 0 : elapsed_time <= 0xFFFF ? 2 : 4;
293} 340}
294 341
295void dccp_insert_option_elapsed_time(struct sock *sk, 342int dccp_insert_option_elapsed_time(struct sock *sk, struct sk_buff *skb,
296 struct sk_buff *skb, 343 u32 elapsed_time)
297 u32 elapsed_time)
298{ 344{
299#ifdef CONFIG_IP_DCCP_DEBUG
300 struct dccp_sock *dp = dccp_sk(sk);
301 const char *debug_prefix = dp->dccps_role == DCCP_ROLE_CLIENT ?
302 "CLIENT TX opt: " : "server TX opt: ";
303#endif
304 const int elapsed_time_len = dccp_elapsed_time_len(elapsed_time); 345 const int elapsed_time_len = dccp_elapsed_time_len(elapsed_time);
305 const int len = 2 + elapsed_time_len; 346 const int len = 2 + elapsed_time_len;
306 unsigned char *to; 347 unsigned char *to;
307 348
308 if (elapsed_time_len == 0) 349 if (elapsed_time_len == 0)
309 return; 350 return 0;
310 351
311 if (DCCP_SKB_CB(skb)->dccpd_opt_len + len > DCCP_MAX_OPT_LEN) { 352 if (DCCP_SKB_CB(skb)->dccpd_opt_len + len > DCCP_MAX_OPT_LEN)
312 LIMIT_NETDEBUG(KERN_INFO "DCCP: packet too small to " 353 return -1;
313 "insert elapsed time!\n");
314 return;
315 }
316 354
317 DCCP_SKB_CB(skb)->dccpd_opt_len += len; 355 DCCP_SKB_CB(skb)->dccpd_opt_len += len;
318 356
@@ -321,17 +359,14 @@ void dccp_insert_option_elapsed_time(struct sock *sk,
321 *to++ = len; 359 *to++ = len;
322 360
323 if (elapsed_time_len == 2) { 361 if (elapsed_time_len == 2) {
324 const u16 var16 = htons((u16)elapsed_time); 362 const __be16 var16 = htons((u16)elapsed_time);
325 memcpy(to, &var16, 2); 363 memcpy(to, &var16, 2);
326 } else { 364 } else {
327 const u32 var32 = htonl(elapsed_time); 365 const __be32 var32 = htonl(elapsed_time);
328 memcpy(to, &var32, 4); 366 memcpy(to, &var32, 4);
329 } 367 }
330 368
331 dccp_pr_debug("%sELAPSED_TIME=%u, len=%d, seqno=%llu\n", 369 return 0;
332 debug_prefix, elapsed_time,
333 len,
334 (unsigned long long) DCCP_SKB_CB(skb)->dccpd_seq);
335} 370}
336 371
337EXPORT_SYMBOL_GPL(dccp_insert_option_elapsed_time); 372EXPORT_SYMBOL_GPL(dccp_insert_option_elapsed_time);
@@ -352,32 +387,27 @@ void dccp_timestamp(const struct sock *sk, struct timeval *tv)
352 387
353EXPORT_SYMBOL_GPL(dccp_timestamp); 388EXPORT_SYMBOL_GPL(dccp_timestamp);
354 389
355void dccp_insert_option_timestamp(struct sock *sk, struct sk_buff *skb) 390int dccp_insert_option_timestamp(struct sock *sk, struct sk_buff *skb)
356{ 391{
357 struct timeval tv; 392 struct timeval tv;
358 u32 now; 393 __be32 now;
359 394
360 dccp_timestamp(sk, &tv); 395 dccp_timestamp(sk, &tv);
361 now = timeval_usecs(&tv) / 10; 396 now = htonl(timeval_usecs(&tv) / 10);
362 /* yes this will overflow but that is the point as we want a 397 /* yes this will overflow but that is the point as we want a
363 * 10 usec 32 bit timer which mean it wraps every 11.9 hours */ 398 * 10 usec 32 bit timer which mean it wraps every 11.9 hours */
364 399
365 now = htonl(now); 400 return dccp_insert_option(sk, skb, DCCPO_TIMESTAMP, &now, sizeof(now));
366 dccp_insert_option(sk, skb, DCCPO_TIMESTAMP, &now, sizeof(now));
367} 401}
368 402
369EXPORT_SYMBOL_GPL(dccp_insert_option_timestamp); 403EXPORT_SYMBOL_GPL(dccp_insert_option_timestamp);
370 404
371static void dccp_insert_option_timestamp_echo(struct sock *sk, 405static int dccp_insert_option_timestamp_echo(struct sock *sk,
372 struct sk_buff *skb) 406 struct sk_buff *skb)
373{ 407{
374 struct dccp_sock *dp = dccp_sk(sk); 408 struct dccp_sock *dp = dccp_sk(sk);
375#ifdef CONFIG_IP_DCCP_DEBUG
376 const char *debug_prefix = dp->dccps_role == DCCP_ROLE_CLIENT ?
377 "CLIENT TX opt: " : "server TX opt: ";
378#endif
379 struct timeval now; 409 struct timeval now;
380 u32 tstamp_echo; 410 __be32 tstamp_echo;
381 u32 elapsed_time; 411 u32 elapsed_time;
382 int len, elapsed_time_len; 412 int len, elapsed_time_len;
383 unsigned char *to; 413 unsigned char *to;
@@ -387,11 +417,8 @@ static void dccp_insert_option_timestamp_echo(struct sock *sk,
387 elapsed_time_len = dccp_elapsed_time_len(elapsed_time); 417 elapsed_time_len = dccp_elapsed_time_len(elapsed_time);
388 len = 6 + elapsed_time_len; 418 len = 6 + elapsed_time_len;
389 419
390 if (DCCP_SKB_CB(skb)->dccpd_opt_len + len > DCCP_MAX_OPT_LEN) { 420 if (DCCP_SKB_CB(skb)->dccpd_opt_len + len > DCCP_MAX_OPT_LEN)
391 LIMIT_NETDEBUG(KERN_INFO "DCCP: packet too small to insert " 421 return -1;
392 "timestamp echo!\n");
393 return;
394 }
395 422
396 DCCP_SKB_CB(skb)->dccpd_opt_len += len; 423 DCCP_SKB_CB(skb)->dccpd_opt_len += len;
397 424
@@ -402,51 +429,149 @@ static void dccp_insert_option_timestamp_echo(struct sock *sk,
402 tstamp_echo = htonl(dp->dccps_timestamp_echo); 429 tstamp_echo = htonl(dp->dccps_timestamp_echo);
403 memcpy(to, &tstamp_echo, 4); 430 memcpy(to, &tstamp_echo, 4);
404 to += 4; 431 to += 4;
405 432
406 if (elapsed_time_len == 2) { 433 if (elapsed_time_len == 2) {
407 const u16 var16 = htons((u16)elapsed_time); 434 const __be16 var16 = htons((u16)elapsed_time);
408 memcpy(to, &var16, 2); 435 memcpy(to, &var16, 2);
409 } else if (elapsed_time_len == 4) { 436 } else if (elapsed_time_len == 4) {
410 const u32 var32 = htonl(elapsed_time); 437 const __be32 var32 = htonl(elapsed_time);
411 memcpy(to, &var32, 4); 438 memcpy(to, &var32, 4);
412 } 439 }
413 440
414 dccp_pr_debug("%sTIMESTAMP_ECHO=%u, len=%d, seqno=%llu\n",
415 debug_prefix, dp->dccps_timestamp_echo,
416 len,
417 (unsigned long long) DCCP_SKB_CB(skb)->dccpd_seq);
418
419 dp->dccps_timestamp_echo = 0; 441 dp->dccps_timestamp_echo = 0;
420 dp->dccps_timestamp_time.tv_sec = 0; 442 dp->dccps_timestamp_time.tv_sec = 0;
421 dp->dccps_timestamp_time.tv_usec = 0; 443 dp->dccps_timestamp_time.tv_usec = 0;
444 return 0;
422} 445}
423 446
424void dccp_insert_options(struct sock *sk, struct sk_buff *skb) 447static int dccp_insert_feat_opt(struct sk_buff *skb, u8 type, u8 feat,
448 u8 *val, u8 len)
449{
450 u8 *to;
451
452 if (DCCP_SKB_CB(skb)->dccpd_opt_len + len + 3 > DCCP_MAX_OPT_LEN) {
453 LIMIT_NETDEBUG(KERN_INFO "DCCP: packet too small"
454 " to insert feature %d option!\n", feat);
455 return -1;
456 }
457
458 DCCP_SKB_CB(skb)->dccpd_opt_len += len + 3;
459
460 to = skb_push(skb, len + 3);
461 *to++ = type;
462 *to++ = len + 3;
463 *to++ = feat;
464
465 if (len)
466 memcpy(to, val, len);
467 dccp_pr_debug("option %d feat %d len %d\n", type, feat, len);
468
469 return 0;
470}
471
472static int dccp_insert_options_feat(struct sock *sk, struct sk_buff *skb)
425{ 473{
426 struct dccp_sock *dp = dccp_sk(sk); 474 struct dccp_sock *dp = dccp_sk(sk);
475 struct dccp_minisock *dmsk = dccp_msk(sk);
476 struct dccp_opt_pend *opt, *next;
477 int change = 0;
478
479 /* confirm any options [NN opts] */
480 list_for_each_entry_safe(opt, next, &dmsk->dccpms_conf, dccpop_node) {
481 dccp_insert_feat_opt(skb, opt->dccpop_type,
482 opt->dccpop_feat, opt->dccpop_val,
483 opt->dccpop_len);
484 /* fear empty confirms */
485 if (opt->dccpop_val)
486 kfree(opt->dccpop_val);
487 kfree(opt);
488 }
489 INIT_LIST_HEAD(&dmsk->dccpms_conf);
490
491 /* see which features we need to send */
492 list_for_each_entry(opt, &dmsk->dccpms_pending, dccpop_node) {
493 /* see if we need to send any confirm */
494 if (opt->dccpop_sc) {
495 dccp_insert_feat_opt(skb, opt->dccpop_type + 1,
496 opt->dccpop_feat,
497 opt->dccpop_sc->dccpoc_val,
498 opt->dccpop_sc->dccpoc_len);
499
500 BUG_ON(!opt->dccpop_sc->dccpoc_val);
501 kfree(opt->dccpop_sc->dccpoc_val);
502 kfree(opt->dccpop_sc);
503 opt->dccpop_sc = NULL;
504 }
505
506 /* any option not confirmed, re-send it */
507 if (!opt->dccpop_conf) {
508 dccp_insert_feat_opt(skb, opt->dccpop_type,
509 opt->dccpop_feat, opt->dccpop_val,
510 opt->dccpop_len);
511 change++;
512 }
513 }
514
515 /* Retransmit timer.
516 * If this is the master listening sock, we don't set a timer on it. It
517 * should be fine because if the dude doesn't receive our RESPONSE
518 * [which will contain the CHANGE] he will send another REQUEST which
519 * will "retrnasmit" the change.
520 */
521 if (change && dp->dccps_role != DCCP_ROLE_LISTEN) {
522 dccp_pr_debug("reset feat negotiation timer %p\n", sk);
523
524 /* XXX don't reset the timer on re-transmissions. I.e. reset it
525 * only when sending new stuff i guess. Currently the timer
526 * never backs off because on re-transmission it just resets it!
527 */
528 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
529 inet_csk(sk)->icsk_rto, DCCP_RTO_MAX);
530 }
531
532 return 0;
533}
534
535int dccp_insert_options(struct sock *sk, struct sk_buff *skb)
536{
537 struct dccp_sock *dp = dccp_sk(sk);
538 struct dccp_minisock *dmsk = dccp_msk(sk);
427 539
428 DCCP_SKB_CB(skb)->dccpd_opt_len = 0; 540 DCCP_SKB_CB(skb)->dccpd_opt_len = 0;
429 541
430 if (dp->dccps_options.dccpo_send_ndp_count) 542 if (dmsk->dccpms_send_ndp_count &&
431 dccp_insert_option_ndp(sk, skb); 543 dccp_insert_option_ndp(sk, skb))
544 return -1;
432 545
433 if (!dccp_packet_without_ack(skb)) { 546 if (!dccp_packet_without_ack(skb)) {
434 if (dp->dccps_options.dccpo_send_ack_vector && 547 if (dmsk->dccpms_send_ack_vector &&
435 dccp_ackvec_pending(dp->dccps_hc_rx_ackvec)) 548 dccp_ackvec_pending(dp->dccps_hc_rx_ackvec) &&
436 dccp_insert_option_ackvec(sk, skb); 549 dccp_insert_option_ackvec(sk, skb))
437 if (dp->dccps_timestamp_echo != 0) 550 return -1;
438 dccp_insert_option_timestamp_echo(sk, skb); 551
552 if (dp->dccps_timestamp_echo != 0 &&
553 dccp_insert_option_timestamp_echo(sk, skb))
554 return -1;
439 } 555 }
440 556
441 if (dp->dccps_hc_rx_insert_options) { 557 if (dp->dccps_hc_rx_insert_options) {
442 ccid_hc_rx_insert_options(dp->dccps_hc_rx_ccid, sk, skb); 558 if (ccid_hc_rx_insert_options(dp->dccps_hc_rx_ccid, sk, skb))
559 return -1;
443 dp->dccps_hc_rx_insert_options = 0; 560 dp->dccps_hc_rx_insert_options = 0;
444 } 561 }
445 if (dp->dccps_hc_tx_insert_options) { 562 if (dp->dccps_hc_tx_insert_options) {
446 ccid_hc_tx_insert_options(dp->dccps_hc_tx_ccid, sk, skb); 563 if (ccid_hc_tx_insert_options(dp->dccps_hc_tx_ccid, sk, skb))
564 return -1;
447 dp->dccps_hc_tx_insert_options = 0; 565 dp->dccps_hc_tx_insert_options = 0;
448 } 566 }
449 567
568 /* Feature negotiation */
569 /* Data packets can't do feat negotiation */
570 if (DCCP_SKB_CB(skb)->dccpd_type != DCCP_PKT_DATA &&
571 DCCP_SKB_CB(skb)->dccpd_type != DCCP_PKT_DATAACK &&
572 dccp_insert_options_feat(sk, skb))
573 return -1;
574
450 /* XXX: insert other options when appropriate */ 575 /* XXX: insert other options when appropriate */
451 576
452 if (DCCP_SKB_CB(skb)->dccpd_opt_len != 0) { 577 if (DCCP_SKB_CB(skb)->dccpd_opt_len != 0) {
@@ -459,4 +584,6 @@ void dccp_insert_options(struct sock *sk, struct sk_buff *skb)
459 DCCP_SKB_CB(skb)->dccpd_opt_len += padding; 584 DCCP_SKB_CB(skb)->dccpd_opt_len += padding;
460 } 585 }
461 } 586 }
587
588 return 0;
462} 589}
diff --git a/net/dccp/output.c b/net/dccp/output.c
index efd7ffb903a1..7409e4a3abdf 100644
--- a/net/dccp/output.c
+++ b/net/dccp/output.c
@@ -27,7 +27,7 @@ static inline void dccp_event_ack_sent(struct sock *sk)
27 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); 27 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
28} 28}
29 29
30static inline void dccp_skb_entail(struct sock *sk, struct sk_buff *skb) 30static void dccp_skb_entail(struct sock *sk, struct sk_buff *skb)
31{ 31{
32 skb_set_owner_w(skb, sk); 32 skb_set_owner_w(skb, sk);
33 WARN_ON(sk->sk_send_head); 33 WARN_ON(sk->sk_send_head);
@@ -49,7 +49,7 @@ static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb)
49 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); 49 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
50 struct dccp_hdr *dh; 50 struct dccp_hdr *dh;
51 /* XXX For now we're using only 48 bits sequence numbers */ 51 /* XXX For now we're using only 48 bits sequence numbers */
52 const int dccp_header_size = sizeof(*dh) + 52 const u32 dccp_header_size = sizeof(*dh) +
53 sizeof(struct dccp_hdr_ext) + 53 sizeof(struct dccp_hdr_ext) +
54 dccp_packet_hdr_len(dcb->dccpd_type); 54 dccp_packet_hdr_len(dcb->dccpd_type);
55 int err, set_ack = 1; 55 int err, set_ack = 1;
@@ -64,6 +64,10 @@ static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb)
64 case DCCP_PKT_DATAACK: 64 case DCCP_PKT_DATAACK:
65 break; 65 break;
66 66
67 case DCCP_PKT_REQUEST:
68 set_ack = 0;
69 /* fall through */
70
67 case DCCP_PKT_SYNC: 71 case DCCP_PKT_SYNC:
68 case DCCP_PKT_SYNCACK: 72 case DCCP_PKT_SYNCACK:
69 ackno = dcb->dccpd_seq; 73 ackno = dcb->dccpd_seq;
@@ -79,7 +83,11 @@ static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb)
79 } 83 }
80 84
81 dcb->dccpd_seq = dp->dccps_gss; 85 dcb->dccpd_seq = dp->dccps_gss;
82 dccp_insert_options(sk, skb); 86
87 if (dccp_insert_options(sk, skb)) {
88 kfree_skb(skb);
89 return -EPROTO;
90 }
83 91
84 skb->h.raw = skb_push(skb, dccp_header_size); 92 skb->h.raw = skb_push(skb, dccp_header_size);
85 dh = dccp_hdr(skb); 93 dh = dccp_hdr(skb);
@@ -275,17 +283,16 @@ struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst,
275{ 283{
276 struct dccp_hdr *dh; 284 struct dccp_hdr *dh;
277 struct dccp_request_sock *dreq; 285 struct dccp_request_sock *dreq;
278 const int dccp_header_size = sizeof(struct dccp_hdr) + 286 const u32 dccp_header_size = sizeof(struct dccp_hdr) +
279 sizeof(struct dccp_hdr_ext) + 287 sizeof(struct dccp_hdr_ext) +
280 sizeof(struct dccp_hdr_response); 288 sizeof(struct dccp_hdr_response);
281 struct sk_buff *skb = sock_wmalloc(sk, MAX_HEADER + DCCP_MAX_OPT_LEN + 289 struct sk_buff *skb = sock_wmalloc(sk, sk->sk_prot->max_header, 1,
282 dccp_header_size, 1,
283 GFP_ATOMIC); 290 GFP_ATOMIC);
284 if (skb == NULL) 291 if (skb == NULL)
285 return NULL; 292 return NULL;
286 293
287 /* Reserve space for headers. */ 294 /* Reserve space for headers. */
288 skb_reserve(skb, MAX_HEADER + DCCP_MAX_OPT_LEN + dccp_header_size); 295 skb_reserve(skb, sk->sk_prot->max_header);
289 296
290 skb->dst = dst_clone(dst); 297 skb->dst = dst_clone(dst);
291 skb->csum = 0; 298 skb->csum = 0;
@@ -293,7 +300,11 @@ struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst,
293 dreq = dccp_rsk(req); 300 dreq = dccp_rsk(req);
294 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESPONSE; 301 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESPONSE;
295 DCCP_SKB_CB(skb)->dccpd_seq = dreq->dreq_iss; 302 DCCP_SKB_CB(skb)->dccpd_seq = dreq->dreq_iss;
296 dccp_insert_options(sk, skb); 303
304 if (dccp_insert_options(sk, skb)) {
305 kfree_skb(skb);
306 return NULL;
307 }
297 308
298 skb->h.raw = skb_push(skb, dccp_header_size); 309 skb->h.raw = skb_push(skb, dccp_header_size);
299 310
@@ -310,32 +321,28 @@ struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst,
310 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dreq->dreq_isr); 321 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dreq->dreq_isr);
311 dccp_hdr_response(skb)->dccph_resp_service = dreq->dreq_service; 322 dccp_hdr_response(skb)->dccph_resp_service = dreq->dreq_service;
312 323
313 dh->dccph_checksum = dccp_v4_checksum(skb, inet_rsk(req)->loc_addr,
314 inet_rsk(req)->rmt_addr);
315
316 DCCP_INC_STATS(DCCP_MIB_OUTSEGS); 324 DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
317 return skb; 325 return skb;
318} 326}
319 327
320EXPORT_SYMBOL_GPL(dccp_make_response); 328EXPORT_SYMBOL_GPL(dccp_make_response);
321 329
322struct sk_buff *dccp_make_reset(struct sock *sk, struct dst_entry *dst, 330static struct sk_buff *dccp_make_reset(struct sock *sk, struct dst_entry *dst,
323 const enum dccp_reset_codes code) 331 const enum dccp_reset_codes code)
324 332
325{ 333{
326 struct dccp_hdr *dh; 334 struct dccp_hdr *dh;
327 struct dccp_sock *dp = dccp_sk(sk); 335 struct dccp_sock *dp = dccp_sk(sk);
328 const int dccp_header_size = sizeof(struct dccp_hdr) + 336 const u32 dccp_header_size = sizeof(struct dccp_hdr) +
329 sizeof(struct dccp_hdr_ext) + 337 sizeof(struct dccp_hdr_ext) +
330 sizeof(struct dccp_hdr_reset); 338 sizeof(struct dccp_hdr_reset);
331 struct sk_buff *skb = sock_wmalloc(sk, MAX_HEADER + DCCP_MAX_OPT_LEN + 339 struct sk_buff *skb = sock_wmalloc(sk, sk->sk_prot->max_header, 1,
332 dccp_header_size, 1,
333 GFP_ATOMIC); 340 GFP_ATOMIC);
334 if (skb == NULL) 341 if (skb == NULL)
335 return NULL; 342 return NULL;
336 343
337 /* Reserve space for headers. */ 344 /* Reserve space for headers. */
338 skb_reserve(skb, MAX_HEADER + DCCP_MAX_OPT_LEN + dccp_header_size); 345 skb_reserve(skb, sk->sk_prot->max_header);
339 346
340 skb->dst = dst_clone(dst); 347 skb->dst = dst_clone(dst);
341 skb->csum = 0; 348 skb->csum = 0;
@@ -345,7 +352,11 @@ struct sk_buff *dccp_make_reset(struct sock *sk, struct dst_entry *dst,
345 DCCP_SKB_CB(skb)->dccpd_reset_code = code; 352 DCCP_SKB_CB(skb)->dccpd_reset_code = code;
346 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESET; 353 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESET;
347 DCCP_SKB_CB(skb)->dccpd_seq = dp->dccps_gss; 354 DCCP_SKB_CB(skb)->dccpd_seq = dp->dccps_gss;
348 dccp_insert_options(sk, skb); 355
356 if (dccp_insert_options(sk, skb)) {
357 kfree_skb(skb);
358 return NULL;
359 }
349 360
350 skb->h.raw = skb_push(skb, dccp_header_size); 361 skb->h.raw = skb_push(skb, dccp_header_size);
351 362
@@ -362,14 +373,34 @@ struct sk_buff *dccp_make_reset(struct sock *sk, struct dst_entry *dst,
362 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dp->dccps_gsr); 373 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dp->dccps_gsr);
363 374
364 dccp_hdr_reset(skb)->dccph_reset_code = code; 375 dccp_hdr_reset(skb)->dccph_reset_code = code;
365 376 inet_csk(sk)->icsk_af_ops->send_check(sk, skb->len, skb);
366 dh->dccph_checksum = dccp_v4_checksum(skb, inet_sk(sk)->saddr,
367 inet_sk(sk)->daddr);
368 377
369 DCCP_INC_STATS(DCCP_MIB_OUTSEGS); 378 DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
370 return skb; 379 return skb;
371} 380}
372 381
382int dccp_send_reset(struct sock *sk, enum dccp_reset_codes code)
383{
384 /*
385 * FIXME: what if rebuild_header fails?
386 * Should we be doing a rebuild_header here?
387 */
388 int err = inet_sk_rebuild_header(sk);
389
390 if (err == 0) {
391 struct sk_buff *skb = dccp_make_reset(sk, sk->sk_dst_cache,
392 code);
393 if (skb != NULL) {
394 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
395 err = inet_csk(sk)->icsk_af_ops->queue_xmit(skb, 0);
396 if (err == NET_XMIT_CN)
397 err = 0;
398 }
399 }
400
401 return err;
402}
403
373/* 404/*
374 * Do all connect socket setups that can be done AF independent. 405 * Do all connect socket setups that can be done AF independent.
375 */ 406 */
@@ -405,12 +436,12 @@ int dccp_connect(struct sock *sk)
405 436
406 dccp_connect_init(sk); 437 dccp_connect_init(sk);
407 438
408 skb = alloc_skb(MAX_DCCP_HEADER + 15, sk->sk_allocation); 439 skb = alloc_skb(sk->sk_prot->max_header, sk->sk_allocation);
409 if (unlikely(skb == NULL)) 440 if (unlikely(skb == NULL))
410 return -ENOBUFS; 441 return -ENOBUFS;
411 442
412 /* Reserve space for headers. */ 443 /* Reserve space for headers. */
413 skb_reserve(skb, MAX_DCCP_HEADER); 444 skb_reserve(skb, sk->sk_prot->max_header);
414 445
415 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_REQUEST; 446 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_REQUEST;
416 skb->csum = 0; 447 skb->csum = 0;
@@ -431,7 +462,8 @@ void dccp_send_ack(struct sock *sk)
431{ 462{
432 /* If we have been reset, we may not send again. */ 463 /* If we have been reset, we may not send again. */
433 if (sk->sk_state != DCCP_CLOSED) { 464 if (sk->sk_state != DCCP_CLOSED) {
434 struct sk_buff *skb = alloc_skb(MAX_DCCP_HEADER, GFP_ATOMIC); 465 struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header,
466 GFP_ATOMIC);
435 467
436 if (skb == NULL) { 468 if (skb == NULL) {
437 inet_csk_schedule_ack(sk); 469 inet_csk_schedule_ack(sk);
@@ -443,7 +475,7 @@ void dccp_send_ack(struct sock *sk)
443 } 475 }
444 476
445 /* Reserve space for headers */ 477 /* Reserve space for headers */
446 skb_reserve(skb, MAX_DCCP_HEADER); 478 skb_reserve(skb, sk->sk_prot->max_header);
447 skb->csum = 0; 479 skb->csum = 0;
448 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_ACK; 480 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_ACK;
449 dccp_transmit_skb(sk, skb); 481 dccp_transmit_skb(sk, skb);
@@ -490,14 +522,14 @@ void dccp_send_sync(struct sock *sk, const u64 seq,
490 * dccp_transmit_skb() will set the ownership to this 522 * dccp_transmit_skb() will set the ownership to this
491 * sock. 523 * sock.
492 */ 524 */
493 struct sk_buff *skb = alloc_skb(MAX_DCCP_HEADER, GFP_ATOMIC); 525 struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header, GFP_ATOMIC);
494 526
495 if (skb == NULL) 527 if (skb == NULL)
496 /* FIXME: how to make sure the sync is sent? */ 528 /* FIXME: how to make sure the sync is sent? */
497 return; 529 return;
498 530
499 /* Reserve space for headers and prepare control bits. */ 531 /* Reserve space for headers and prepare control bits. */
500 skb_reserve(skb, MAX_DCCP_HEADER); 532 skb_reserve(skb, sk->sk_prot->max_header);
501 skb->csum = 0; 533 skb->csum = 0;
502 DCCP_SKB_CB(skb)->dccpd_type = pkt_type; 534 DCCP_SKB_CB(skb)->dccpd_type = pkt_type;
503 DCCP_SKB_CB(skb)->dccpd_seq = seq; 535 DCCP_SKB_CB(skb)->dccpd_seq = seq;
@@ -505,6 +537,8 @@ void dccp_send_sync(struct sock *sk, const u64 seq,
505 dccp_transmit_skb(sk, skb); 537 dccp_transmit_skb(sk, skb);
506} 538}
507 539
540EXPORT_SYMBOL_GPL(dccp_send_sync);
541
508/* 542/*
509 * Send a DCCP_PKT_CLOSE/CLOSEREQ. The caller locks the socket for us. This 543 * Send a DCCP_PKT_CLOSE/CLOSEREQ. The caller locks the socket for us. This
510 * cannot be allowed to fail queueing a DCCP_PKT_CLOSE/CLOSEREQ frame under 544 * cannot be allowed to fail queueing a DCCP_PKT_CLOSE/CLOSEREQ frame under
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 65b11ea90d85..d4b293e16283 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -23,9 +23,7 @@
23#include <linux/random.h> 23#include <linux/random.h>
24#include <net/checksum.h> 24#include <net/checksum.h>
25 25
26#include <net/inet_common.h>
27#include <net/inet_sock.h> 26#include <net/inet_sock.h>
28#include <net/protocol.h>
29#include <net/sock.h> 27#include <net/sock.h>
30#include <net/xfrm.h> 28#include <net/xfrm.h>
31 29
@@ -37,6 +35,7 @@
37 35
38#include "ccid.h" 36#include "ccid.h"
39#include "dccp.h" 37#include "dccp.h"
38#include "feat.h"
40 39
41DEFINE_SNMP_STAT(struct dccp_mib, dccp_statistics) __read_mostly; 40DEFINE_SNMP_STAT(struct dccp_mib, dccp_statistics) __read_mostly;
42 41
@@ -46,12 +45,66 @@ atomic_t dccp_orphan_count = ATOMIC_INIT(0);
46 45
47EXPORT_SYMBOL_GPL(dccp_orphan_count); 46EXPORT_SYMBOL_GPL(dccp_orphan_count);
48 47
49static struct net_protocol dccp_protocol = { 48struct inet_hashinfo __cacheline_aligned dccp_hashinfo = {
50 .handler = dccp_v4_rcv, 49 .lhash_lock = RW_LOCK_UNLOCKED,
51 .err_handler = dccp_v4_err, 50 .lhash_users = ATOMIC_INIT(0),
52 .no_policy = 1, 51 .lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(dccp_hashinfo.lhash_wait),
53}; 52};
54 53
54EXPORT_SYMBOL_GPL(dccp_hashinfo);
55
56void dccp_set_state(struct sock *sk, const int state)
57{
58 const int oldstate = sk->sk_state;
59
60 dccp_pr_debug("%s(%p) %-10.10s -> %s\n",
61 dccp_role(sk), sk,
62 dccp_state_name(oldstate), dccp_state_name(state));
63 WARN_ON(state == oldstate);
64
65 switch (state) {
66 case DCCP_OPEN:
67 if (oldstate != DCCP_OPEN)
68 DCCP_INC_STATS(DCCP_MIB_CURRESTAB);
69 break;
70
71 case DCCP_CLOSED:
72 if (oldstate == DCCP_CLOSING || oldstate == DCCP_OPEN)
73 DCCP_INC_STATS(DCCP_MIB_ESTABRESETS);
74
75 sk->sk_prot->unhash(sk);
76 if (inet_csk(sk)->icsk_bind_hash != NULL &&
77 !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
78 inet_put_port(&dccp_hashinfo, sk);
79 /* fall through */
80 default:
81 if (oldstate == DCCP_OPEN)
82 DCCP_DEC_STATS(DCCP_MIB_CURRESTAB);
83 }
84
85 /* Change state AFTER socket is unhashed to avoid closed
86 * socket sitting in hash tables.
87 */
88 sk->sk_state = state;
89}
90
91EXPORT_SYMBOL_GPL(dccp_set_state);
92
93void dccp_done(struct sock *sk)
94{
95 dccp_set_state(sk, DCCP_CLOSED);
96 dccp_clear_xmit_timers(sk);
97
98 sk->sk_shutdown = SHUTDOWN_MASK;
99
100 if (!sock_flag(sk, SOCK_DEAD))
101 sk->sk_state_change(sk);
102 else
103 inet_csk_destroy_sock(sk);
104}
105
106EXPORT_SYMBOL_GPL(dccp_done);
107
55const char *dccp_packet_name(const int type) 108const char *dccp_packet_name(const int type)
56{ 109{
57 static const char *dccp_packet_names[] = { 110 static const char *dccp_packet_names[] = {
@@ -96,6 +149,120 @@ const char *dccp_state_name(const int state)
96 149
97EXPORT_SYMBOL_GPL(dccp_state_name); 150EXPORT_SYMBOL_GPL(dccp_state_name);
98 151
152void dccp_hash(struct sock *sk)
153{
154 inet_hash(&dccp_hashinfo, sk);
155}
156
157EXPORT_SYMBOL_GPL(dccp_hash);
158
159void dccp_unhash(struct sock *sk)
160{
161 inet_unhash(&dccp_hashinfo, sk);
162}
163
164EXPORT_SYMBOL_GPL(dccp_unhash);
165
166int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized)
167{
168 struct dccp_sock *dp = dccp_sk(sk);
169 struct dccp_minisock *dmsk = dccp_msk(sk);
170 struct inet_connection_sock *icsk = inet_csk(sk);
171
172 dccp_minisock_init(&dp->dccps_minisock);
173 do_gettimeofday(&dp->dccps_epoch);
174
175 /*
176 * FIXME: We're hardcoding the CCID, and doing this at this point makes
177 * the listening (master) sock get CCID control blocks, which is not
178 * necessary, but for now, to not mess with the test userspace apps,
179 * lets leave it here, later the real solution is to do this in a
180 * setsockopt(CCIDs-I-want/accept). -acme
181 */
182 if (likely(ctl_sock_initialized)) {
183 int rc = dccp_feat_init(dmsk);
184
185 if (rc)
186 return rc;
187
188 if (dmsk->dccpms_send_ack_vector) {
189 dp->dccps_hc_rx_ackvec = dccp_ackvec_alloc(GFP_KERNEL);
190 if (dp->dccps_hc_rx_ackvec == NULL)
191 return -ENOMEM;
192 }
193 dp->dccps_hc_rx_ccid = ccid_hc_rx_new(dmsk->dccpms_rx_ccid,
194 sk, GFP_KERNEL);
195 dp->dccps_hc_tx_ccid = ccid_hc_tx_new(dmsk->dccpms_tx_ccid,
196 sk, GFP_KERNEL);
197 if (unlikely(dp->dccps_hc_rx_ccid == NULL ||
198 dp->dccps_hc_tx_ccid == NULL)) {
199 ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
200 ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
201 if (dmsk->dccpms_send_ack_vector) {
202 dccp_ackvec_free(dp->dccps_hc_rx_ackvec);
203 dp->dccps_hc_rx_ackvec = NULL;
204 }
205 dp->dccps_hc_rx_ccid = dp->dccps_hc_tx_ccid = NULL;
206 return -ENOMEM;
207 }
208 } else {
209 /* control socket doesn't need feat nego */
210 INIT_LIST_HEAD(&dmsk->dccpms_pending);
211 INIT_LIST_HEAD(&dmsk->dccpms_conf);
212 }
213
214 dccp_init_xmit_timers(sk);
215 icsk->icsk_rto = DCCP_TIMEOUT_INIT;
216 sk->sk_state = DCCP_CLOSED;
217 sk->sk_write_space = dccp_write_space;
218 icsk->icsk_sync_mss = dccp_sync_mss;
219 dp->dccps_mss_cache = 536;
220 dp->dccps_role = DCCP_ROLE_UNDEFINED;
221 dp->dccps_service = DCCP_SERVICE_INVALID_VALUE;
222 dp->dccps_l_ack_ratio = dp->dccps_r_ack_ratio = 1;
223
224 return 0;
225}
226
227EXPORT_SYMBOL_GPL(dccp_init_sock);
228
229int dccp_destroy_sock(struct sock *sk)
230{
231 struct dccp_sock *dp = dccp_sk(sk);
232 struct dccp_minisock *dmsk = dccp_msk(sk);
233
234 /*
235 * DCCP doesn't use sk_write_queue, just sk_send_head
236 * for retransmissions
237 */
238 if (sk->sk_send_head != NULL) {
239 kfree_skb(sk->sk_send_head);
240 sk->sk_send_head = NULL;
241 }
242
243 /* Clean up a referenced DCCP bind bucket. */
244 if (inet_csk(sk)->icsk_bind_hash != NULL)
245 inet_put_port(&dccp_hashinfo, sk);
246
247 kfree(dp->dccps_service_list);
248 dp->dccps_service_list = NULL;
249
250 if (dmsk->dccpms_send_ack_vector) {
251 dccp_ackvec_free(dp->dccps_hc_rx_ackvec);
252 dp->dccps_hc_rx_ackvec = NULL;
253 }
254 ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
255 ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
256 dp->dccps_hc_rx_ccid = dp->dccps_hc_tx_ccid = NULL;
257
258 /* clean up feature negotiation state */
259 dccp_feat_clean(dmsk);
260
261 return 0;
262}
263
264EXPORT_SYMBOL_GPL(dccp_destroy_sock);
265
99static inline int dccp_listen_start(struct sock *sk) 266static inline int dccp_listen_start(struct sock *sk)
100{ 267{
101 struct dccp_sock *dp = dccp_sk(sk); 268 struct dccp_sock *dp = dccp_sk(sk);
@@ -220,7 +387,7 @@ int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg)
220 387
221EXPORT_SYMBOL_GPL(dccp_ioctl); 388EXPORT_SYMBOL_GPL(dccp_ioctl);
222 389
223static int dccp_setsockopt_service(struct sock *sk, const u32 service, 390static int dccp_setsockopt_service(struct sock *sk, const __be32 service,
224 char __user *optval, int optlen) 391 char __user *optval, int optlen)
225{ 392{
226 struct dccp_sock *dp = dccp_sk(sk); 393 struct dccp_sock *dp = dccp_sk(sk);
@@ -255,18 +422,46 @@ static int dccp_setsockopt_service(struct sock *sk, const u32 service,
255 return 0; 422 return 0;
256} 423}
257 424
258int dccp_setsockopt(struct sock *sk, int level, int optname, 425/* byte 1 is feature. the rest is the preference list */
259 char __user *optval, int optlen) 426static int dccp_setsockopt_change(struct sock *sk, int type,
427 struct dccp_so_feat __user *optval)
428{
429 struct dccp_so_feat opt;
430 u8 *val;
431 int rc;
432
433 if (copy_from_user(&opt, optval, sizeof(opt)))
434 return -EFAULT;
435
436 val = kmalloc(opt.dccpsf_len, GFP_KERNEL);
437 if (!val)
438 return -ENOMEM;
439
440 if (copy_from_user(val, opt.dccpsf_val, opt.dccpsf_len)) {
441 rc = -EFAULT;
442 goto out_free_val;
443 }
444
445 rc = dccp_feat_change(dccp_msk(sk), type, opt.dccpsf_feat,
446 val, opt.dccpsf_len, GFP_KERNEL);
447 if (rc)
448 goto out_free_val;
449
450out:
451 return rc;
452
453out_free_val:
454 kfree(val);
455 goto out;
456}
457
458static int do_dccp_setsockopt(struct sock *sk, int level, int optname,
459 char __user *optval, int optlen)
260{ 460{
261 struct dccp_sock *dp; 461 struct dccp_sock *dp;
262 int err; 462 int err;
263 int val; 463 int val;
264 464
265 if (level != SOL_DCCP)
266 return inet_csk(sk)->icsk_af_ops->setsockopt(sk, level,
267 optname, optval,
268 optlen);
269
270 if (optlen < sizeof(int)) 465 if (optlen < sizeof(int))
271 return -EINVAL; 466 return -EINVAL;
272 467
@@ -284,6 +479,25 @@ int dccp_setsockopt(struct sock *sk, int level, int optname,
284 case DCCP_SOCKOPT_PACKET_SIZE: 479 case DCCP_SOCKOPT_PACKET_SIZE:
285 dp->dccps_packet_size = val; 480 dp->dccps_packet_size = val;
286 break; 481 break;
482
483 case DCCP_SOCKOPT_CHANGE_L:
484 if (optlen != sizeof(struct dccp_so_feat))
485 err = -EINVAL;
486 else
487 err = dccp_setsockopt_change(sk, DCCPO_CHANGE_L,
488 (struct dccp_so_feat *)
489 optval);
490 break;
491
492 case DCCP_SOCKOPT_CHANGE_R:
493 if (optlen != sizeof(struct dccp_so_feat))
494 err = -EINVAL;
495 else
496 err = dccp_setsockopt_change(sk, DCCPO_CHANGE_R,
497 (struct dccp_so_feat *)
498 optval);
499 break;
500
287 default: 501 default:
288 err = -ENOPROTOOPT; 502 err = -ENOPROTOOPT;
289 break; 503 break;
@@ -293,10 +507,33 @@ int dccp_setsockopt(struct sock *sk, int level, int optname,
293 return err; 507 return err;
294} 508}
295 509
510int dccp_setsockopt(struct sock *sk, int level, int optname,
511 char __user *optval, int optlen)
512{
513 if (level != SOL_DCCP)
514 return inet_csk(sk)->icsk_af_ops->setsockopt(sk, level,
515 optname, optval,
516 optlen);
517 return do_dccp_setsockopt(sk, level, optname, optval, optlen);
518}
519
296EXPORT_SYMBOL_GPL(dccp_setsockopt); 520EXPORT_SYMBOL_GPL(dccp_setsockopt);
297 521
522#ifdef CONFIG_COMPAT
523int compat_dccp_setsockopt(struct sock *sk, int level, int optname,
524 char __user *optval, int optlen)
525{
526 if (level != SOL_DCCP)
527 return inet_csk_compat_setsockopt(sk, level, optname,
528 optval, optlen);
529 return do_dccp_setsockopt(sk, level, optname, optval, optlen);
530}
531
532EXPORT_SYMBOL_GPL(compat_dccp_setsockopt);
533#endif
534
298static int dccp_getsockopt_service(struct sock *sk, int len, 535static int dccp_getsockopt_service(struct sock *sk, int len,
299 u32 __user *optval, 536 __be32 __user *optval,
300 int __user *optlen) 537 int __user *optlen)
301{ 538{
302 const struct dccp_sock *dp = dccp_sk(sk); 539 const struct dccp_sock *dp = dccp_sk(sk);
@@ -326,16 +563,12 @@ out:
326 return err; 563 return err;
327} 564}
328 565
329int dccp_getsockopt(struct sock *sk, int level, int optname, 566static int do_dccp_getsockopt(struct sock *sk, int level, int optname,
330 char __user *optval, int __user *optlen) 567 char __user *optval, int __user *optlen)
331{ 568{
332 struct dccp_sock *dp; 569 struct dccp_sock *dp;
333 int val, len; 570 int val, len;
334 571
335 if (level != SOL_DCCP)
336 return inet_csk(sk)->icsk_af_ops->getsockopt(sk, level,
337 optname, optval,
338 optlen);
339 if (get_user(len, optlen)) 572 if (get_user(len, optlen))
340 return -EFAULT; 573 return -EFAULT;
341 574
@@ -351,7 +584,7 @@ int dccp_getsockopt(struct sock *sk, int level, int optname,
351 break; 584 break;
352 case DCCP_SOCKOPT_SERVICE: 585 case DCCP_SOCKOPT_SERVICE:
353 return dccp_getsockopt_service(sk, len, 586 return dccp_getsockopt_service(sk, len,
354 (u32 __user *)optval, optlen); 587 (__be32 __user *)optval, optlen);
355 case 128 ... 191: 588 case 128 ... 191:
356 return ccid_hc_rx_getsockopt(dp->dccps_hc_rx_ccid, sk, optname, 589 return ccid_hc_rx_getsockopt(dp->dccps_hc_rx_ccid, sk, optname,
357 len, (u32 __user *)optval, optlen); 590 len, (u32 __user *)optval, optlen);
@@ -368,8 +601,31 @@ int dccp_getsockopt(struct sock *sk, int level, int optname,
368 return 0; 601 return 0;
369} 602}
370 603
604int dccp_getsockopt(struct sock *sk, int level, int optname,
605 char __user *optval, int __user *optlen)
606{
607 if (level != SOL_DCCP)
608 return inet_csk(sk)->icsk_af_ops->getsockopt(sk, level,
609 optname, optval,
610 optlen);
611 return do_dccp_getsockopt(sk, level, optname, optval, optlen);
612}
613
371EXPORT_SYMBOL_GPL(dccp_getsockopt); 614EXPORT_SYMBOL_GPL(dccp_getsockopt);
372 615
616#ifdef CONFIG_COMPAT
617int compat_dccp_getsockopt(struct sock *sk, int level, int optname,
618 char __user *optval, int __user *optlen)
619{
620 if (level != SOL_DCCP)
621 return inet_csk_compat_getsockopt(sk, level, optname,
622 optval, optlen);
623 return do_dccp_getsockopt(sk, level, optname, optval, optlen);
624}
625
626EXPORT_SYMBOL_GPL(compat_dccp_getsockopt);
627#endif
628
373int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, 629int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
374 size_t len) 630 size_t len)
375{ 631{
@@ -679,84 +935,7 @@ void dccp_shutdown(struct sock *sk, int how)
679 935
680EXPORT_SYMBOL_GPL(dccp_shutdown); 936EXPORT_SYMBOL_GPL(dccp_shutdown);
681 937
682static const struct proto_ops inet_dccp_ops = { 938static int __init dccp_mib_init(void)
683 .family = PF_INET,
684 .owner = THIS_MODULE,
685 .release = inet_release,
686 .bind = inet_bind,
687 .connect = inet_stream_connect,
688 .socketpair = sock_no_socketpair,
689 .accept = inet_accept,
690 .getname = inet_getname,
691 /* FIXME: work on tcp_poll to rename it to inet_csk_poll */
692 .poll = dccp_poll,
693 .ioctl = inet_ioctl,
694 /* FIXME: work on inet_listen to rename it to sock_common_listen */
695 .listen = inet_dccp_listen,
696 .shutdown = inet_shutdown,
697 .setsockopt = sock_common_setsockopt,
698 .getsockopt = sock_common_getsockopt,
699 .sendmsg = inet_sendmsg,
700 .recvmsg = sock_common_recvmsg,
701 .mmap = sock_no_mmap,
702 .sendpage = sock_no_sendpage,
703};
704
705extern struct net_proto_family inet_family_ops;
706
707static struct inet_protosw dccp_v4_protosw = {
708 .type = SOCK_DCCP,
709 .protocol = IPPROTO_DCCP,
710 .prot = &dccp_prot,
711 .ops = &inet_dccp_ops,
712 .capability = -1,
713 .no_check = 0,
714 .flags = INET_PROTOSW_ICSK,
715};
716
717/*
718 * This is the global socket data structure used for responding to
719 * the Out-of-the-blue (OOTB) packets. A control sock will be created
720 * for this socket at the initialization time.
721 */
722struct socket *dccp_ctl_socket;
723
724static char dccp_ctl_socket_err_msg[] __initdata =
725 KERN_ERR "DCCP: Failed to create the control socket.\n";
726
727static int __init dccp_ctl_sock_init(void)
728{
729 int rc = sock_create_kern(PF_INET, SOCK_DCCP, IPPROTO_DCCP,
730 &dccp_ctl_socket);
731 if (rc < 0)
732 printk(dccp_ctl_socket_err_msg);
733 else {
734 dccp_ctl_socket->sk->sk_allocation = GFP_ATOMIC;
735 inet_sk(dccp_ctl_socket->sk)->uc_ttl = -1;
736
737 /* Unhash it so that IP input processing does not even
738 * see it, we do not wish this socket to see incoming
739 * packets.
740 */
741 dccp_ctl_socket->sk->sk_prot->unhash(dccp_ctl_socket->sk);
742 }
743
744 return rc;
745}
746
747#ifdef CONFIG_IP_DCCP_UNLOAD_HACK
748void dccp_ctl_sock_exit(void)
749{
750 if (dccp_ctl_socket != NULL) {
751 sock_release(dccp_ctl_socket);
752 dccp_ctl_socket = NULL;
753 }
754}
755
756EXPORT_SYMBOL_GPL(dccp_ctl_sock_exit);
757#endif
758
759static int __init init_dccp_v4_mibs(void)
760{ 939{
761 int rc = -ENOMEM; 940 int rc = -ENOMEM;
762 941
@@ -778,6 +957,13 @@ out_free_one:
778 957
779} 958}
780 959
960static void dccp_mib_exit(void)
961{
962 free_percpu(dccp_statistics[0]);
963 free_percpu(dccp_statistics[1]);
964 dccp_statistics[0] = dccp_statistics[1] = NULL;
965}
966
781static int thash_entries; 967static int thash_entries;
782module_param(thash_entries, int, 0444); 968module_param(thash_entries, int, 0444);
783MODULE_PARM_DESC(thash_entries, "Number of ehash buckets"); 969MODULE_PARM_DESC(thash_entries, "Number of ehash buckets");
@@ -794,17 +980,14 @@ static int __init dccp_init(void)
794{ 980{
795 unsigned long goal; 981 unsigned long goal;
796 int ehash_order, bhash_order, i; 982 int ehash_order, bhash_order, i;
797 int rc = proto_register(&dccp_prot, 1); 983 int rc = -ENOBUFS;
798
799 if (rc)
800 goto out;
801 984
802 dccp_hashinfo.bind_bucket_cachep = 985 dccp_hashinfo.bind_bucket_cachep =
803 kmem_cache_create("dccp_bind_bucket", 986 kmem_cache_create("dccp_bind_bucket",
804 sizeof(struct inet_bind_bucket), 0, 987 sizeof(struct inet_bind_bucket), 0,
805 SLAB_HWCACHE_ALIGN, NULL, NULL); 988 SLAB_HWCACHE_ALIGN, NULL, NULL);
806 if (!dccp_hashinfo.bind_bucket_cachep) 989 if (!dccp_hashinfo.bind_bucket_cachep)
807 goto out_proto_unregister; 990 goto out;
808 991
809 /* 992 /*
810 * Size and allocate the main established and bind bucket 993 * Size and allocate the main established and bind bucket
@@ -866,27 +1049,23 @@ static int __init dccp_init(void)
866 INIT_HLIST_HEAD(&dccp_hashinfo.bhash[i].chain); 1049 INIT_HLIST_HEAD(&dccp_hashinfo.bhash[i].chain);
867 } 1050 }
868 1051
869 if (init_dccp_v4_mibs()) 1052 rc = dccp_mib_init();
1053 if (rc)
870 goto out_free_dccp_bhash; 1054 goto out_free_dccp_bhash;
871 1055
872 rc = -EAGAIN; 1056 rc = dccp_ackvec_init();
873 if (inet_add_protocol(&dccp_protocol, IPPROTO_DCCP)) 1057 if (rc)
874 goto out_free_dccp_v4_mibs; 1058 goto out_free_dccp_mib;
875
876 inet_register_protosw(&dccp_v4_protosw);
877 1059
878 rc = dccp_ctl_sock_init(); 1060 rc = dccp_sysctl_init();
879 if (rc) 1061 if (rc)
880 goto out_unregister_protosw; 1062 goto out_ackvec_exit;
881out: 1063out:
882 return rc; 1064 return rc;
883out_unregister_protosw: 1065out_ackvec_exit:
884 inet_unregister_protosw(&dccp_v4_protosw); 1066 dccp_ackvec_exit();
885 inet_del_protocol(&dccp_protocol, IPPROTO_DCCP); 1067out_free_dccp_mib:
886out_free_dccp_v4_mibs: 1068 dccp_mib_exit();
887 free_percpu(dccp_statistics[0]);
888 free_percpu(dccp_statistics[1]);
889 dccp_statistics[0] = dccp_statistics[1] = NULL;
890out_free_dccp_bhash: 1069out_free_dccp_bhash:
891 free_pages((unsigned long)dccp_hashinfo.bhash, bhash_order); 1070 free_pages((unsigned long)dccp_hashinfo.bhash, bhash_order);
892 dccp_hashinfo.bhash = NULL; 1071 dccp_hashinfo.bhash = NULL;
@@ -896,23 +1075,12 @@ out_free_dccp_ehash:
896out_free_bind_bucket_cachep: 1075out_free_bind_bucket_cachep:
897 kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep); 1076 kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
898 dccp_hashinfo.bind_bucket_cachep = NULL; 1077 dccp_hashinfo.bind_bucket_cachep = NULL;
899out_proto_unregister:
900 proto_unregister(&dccp_prot);
901 goto out; 1078 goto out;
902} 1079}
903 1080
904static const char dccp_del_proto_err_msg[] __exitdata =
905 KERN_ERR "can't remove dccp net_protocol\n";
906
907static void __exit dccp_fini(void) 1081static void __exit dccp_fini(void)
908{ 1082{
909 inet_unregister_protosw(&dccp_v4_protosw); 1083 dccp_mib_exit();
910
911 if (inet_del_protocol(&dccp_protocol, IPPROTO_DCCP) < 0)
912 printk(dccp_del_proto_err_msg);
913
914 free_percpu(dccp_statistics[0]);
915 free_percpu(dccp_statistics[1]);
916 free_pages((unsigned long)dccp_hashinfo.bhash, 1084 free_pages((unsigned long)dccp_hashinfo.bhash,
917 get_order(dccp_hashinfo.bhash_size * 1085 get_order(dccp_hashinfo.bhash_size *
918 sizeof(struct inet_bind_hashbucket))); 1086 sizeof(struct inet_bind_hashbucket)));
@@ -920,19 +1088,13 @@ static void __exit dccp_fini(void)
920 get_order(dccp_hashinfo.ehash_size * 1088 get_order(dccp_hashinfo.ehash_size *
921 sizeof(struct inet_ehash_bucket))); 1089 sizeof(struct inet_ehash_bucket)));
922 kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep); 1090 kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
923 proto_unregister(&dccp_prot); 1091 dccp_ackvec_exit();
1092 dccp_sysctl_exit();
924} 1093}
925 1094
926module_init(dccp_init); 1095module_init(dccp_init);
927module_exit(dccp_fini); 1096module_exit(dccp_fini);
928 1097
929/*
930 * __stringify doesn't likes enums, so use SOCK_DCCP (6) and IPPROTO_DCCP (33)
931 * values directly, Also cover the case where the protocol is not specified,
932 * i.e. net-pf-PF_INET-proto-0-type-SOCK_DCCP
933 */
934MODULE_ALIAS("net-pf-" __stringify(PF_INET) "-proto-33-type-6");
935MODULE_ALIAS("net-pf-" __stringify(PF_INET) "-proto-0-type-6");
936MODULE_LICENSE("GPL"); 1098MODULE_LICENSE("GPL");
937MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@conectiva.com.br>"); 1099MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@conectiva.com.br>");
938MODULE_DESCRIPTION("DCCP - Datagram Congestion Controlled Protocol"); 1100MODULE_DESCRIPTION("DCCP - Datagram Congestion Controlled Protocol");
diff --git a/net/dccp/sysctl.c b/net/dccp/sysctl.c
new file mode 100644
index 000000000000..64c89e9c229e
--- /dev/null
+++ b/net/dccp/sysctl.c
@@ -0,0 +1,124 @@
1/*
2 * net/dccp/sysctl.c
3 *
4 * An implementation of the DCCP protocol
5 * Arnaldo Carvalho de Melo <acme@mandriva.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License v2
9 * as published by the Free Software Foundation.
10 */
11
12#include <linux/config.h>
13#include <linux/mm.h>
14#include <linux/sysctl.h>
15
16#ifndef CONFIG_SYSCTL
17#error This file should not be compiled without CONFIG_SYSCTL defined
18#endif
19
20extern int dccp_feat_default_sequence_window;
21extern int dccp_feat_default_rx_ccid;
22extern int dccp_feat_default_tx_ccid;
23extern int dccp_feat_default_ack_ratio;
24extern int dccp_feat_default_send_ack_vector;
25extern int dccp_feat_default_send_ndp_count;
26
27static struct ctl_table dccp_default_table[] = {
28 {
29 .ctl_name = NET_DCCP_DEFAULT_SEQ_WINDOW,
30 .procname = "seq_window",
31 .data = &dccp_feat_default_sequence_window,
32 .maxlen = sizeof(dccp_feat_default_sequence_window),
33 .mode = 0644,
34 .proc_handler = proc_dointvec,
35 },
36 {
37 .ctl_name = NET_DCCP_DEFAULT_RX_CCID,
38 .procname = "rx_ccid",
39 .data = &dccp_feat_default_rx_ccid,
40 .maxlen = sizeof(dccp_feat_default_rx_ccid),
41 .mode = 0644,
42 .proc_handler = proc_dointvec,
43 },
44 {
45 .ctl_name = NET_DCCP_DEFAULT_TX_CCID,
46 .procname = "tx_ccid",
47 .data = &dccp_feat_default_tx_ccid,
48 .maxlen = sizeof(dccp_feat_default_tx_ccid),
49 .mode = 0644,
50 .proc_handler = proc_dointvec,
51 },
52 {
53 .ctl_name = NET_DCCP_DEFAULT_ACK_RATIO,
54 .procname = "ack_ratio",
55 .data = &dccp_feat_default_ack_ratio,
56 .maxlen = sizeof(dccp_feat_default_ack_ratio),
57 .mode = 0644,
58 .proc_handler = proc_dointvec,
59 },
60 {
61 .ctl_name = NET_DCCP_DEFAULT_SEND_ACKVEC,
62 .procname = "send_ackvec",
63 .data = &dccp_feat_default_send_ack_vector,
64 .maxlen = sizeof(dccp_feat_default_send_ack_vector),
65 .mode = 0644,
66 .proc_handler = proc_dointvec,
67 },
68 {
69 .ctl_name = NET_DCCP_DEFAULT_SEND_NDP,
70 .procname = "send_ndp",
71 .data = &dccp_feat_default_send_ndp_count,
72 .maxlen = sizeof(dccp_feat_default_send_ndp_count),
73 .mode = 0644,
74 .proc_handler = proc_dointvec,
75 },
76 { .ctl_name = 0, }
77};
78
79static struct ctl_table dccp_table[] = {
80 {
81 .ctl_name = NET_DCCP_DEFAULT,
82 .procname = "default",
83 .mode = 0555,
84 .child = dccp_default_table,
85 },
86 { .ctl_name = 0, },
87};
88
89static struct ctl_table dccp_dir_table[] = {
90 {
91 .ctl_name = NET_DCCP,
92 .procname = "dccp",
93 .mode = 0555,
94 .child = dccp_table,
95 },
96 { .ctl_name = 0, },
97};
98
99static struct ctl_table dccp_root_table[] = {
100 {
101 .ctl_name = CTL_NET,
102 .procname = "net",
103 .mode = 0555,
104 .child = dccp_dir_table,
105 },
106 { .ctl_name = 0, },
107};
108
109static struct ctl_table_header *dccp_table_header;
110
111int __init dccp_sysctl_init(void)
112{
113 dccp_table_header = register_sysctl_table(dccp_root_table, 1);
114
115 return dccp_table_header != NULL ? 0 : -ENOMEM;
116}
117
118void dccp_sysctl_exit(void)
119{
120 if (dccp_table_header != NULL) {
121 unregister_sysctl_table(dccp_table_header);
122 dccp_table_header = NULL;
123 }
124}
diff --git a/net/dccp/timer.c b/net/dccp/timer.c
index aa34b576e228..5244415e5f18 100644
--- a/net/dccp/timer.c
+++ b/net/dccp/timer.c
@@ -31,7 +31,7 @@ static void dccp_write_err(struct sock *sk)
31 sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT; 31 sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
32 sk->sk_error_report(sk); 32 sk->sk_error_report(sk);
33 33
34 dccp_v4_send_reset(sk, DCCP_RESET_CODE_ABORTED); 34 dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED);
35 dccp_done(sk); 35 dccp_done(sk);
36 DCCP_INC_STATS_BH(DCCP_MIB_ABORTONTIMEOUT); 36 DCCP_INC_STATS_BH(DCCP_MIB_ABORTONTIMEOUT);
37} 37}
@@ -141,6 +141,17 @@ static void dccp_retransmit_timer(struct sock *sk)
141{ 141{
142 struct inet_connection_sock *icsk = inet_csk(sk); 142 struct inet_connection_sock *icsk = inet_csk(sk);
143 143
144 /* retransmit timer is used for feature negotiation throughout
145 * connection. In this case, no packet is re-transmitted, but rather an
146 * ack is generated and pending changes are splaced into its options.
147 */
148 if (sk->sk_send_head == NULL) {
149 dccp_pr_debug("feat negotiation retransmit timeout %p\n", sk);
150 if (sk->sk_state == DCCP_OPEN)
151 dccp_send_ack(sk);
152 goto backoff;
153 }
154
144 /* 155 /*
145 * sk->sk_send_head has to have one skb with 156 * sk->sk_send_head has to have one skb with
146 * DCCP_SKB_CB(skb)->dccpd_type set to one of the retransmittable DCCP 157 * DCCP_SKB_CB(skb)->dccpd_type set to one of the retransmittable DCCP
@@ -177,6 +188,7 @@ static void dccp_retransmit_timer(struct sock *sk)
177 goto out; 188 goto out;
178 } 189 }
179 190
191backoff:
180 icsk->icsk_backoff++; 192 icsk->icsk_backoff++;
181 icsk->icsk_retransmits++; 193 icsk->icsk_retransmits++;
182 194