aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.osdl.org>2006-12-11 21:35:17 -0500
committerLinus Torvalds <torvalds@woody.osdl.org>2006-12-11 21:35:17 -0500
commit4259cb25d436a79bf6b07d8075423573567c211d (patch)
tree05ae68a795315f4244036358df4c8e0f1034867d
parentcd39301a68f9604854f3543117b01dc73cbe193f (diff)
parenta49f99ffca57a2eada23b1ac908a405c17859e35 (diff)
Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
* master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6: (32 commits) [NETPOLL]: Fix local_bh_enable() warning. [IPVS]: Make ip_vs_sync.c <= 80col wide. [IPVS]: Use msleep_interruptable() instead of ssleep() aka msleep() [HAMRADIO]: Fix baycom_epp.c compile failure. [DCCP]: Whitespace cleanups [DCCP] ccid3: Fixup some type conversions related to rtts [DCCP] ccid3: BUG-FIX - conversion errors [DCCP] ccid3: Reorder packet history source file [DCCP] ccid3: Reorder packet history header file [DCCP] ccid3: Make debug output consistent [DCCP] ccid3: Perform history operations only after packet has been sent [DCCP] ccid3: TX history - remove unused field [DCCP] ccid3: Shift window counter computation [DCCP] ccid3: Sanity-check RTT samples [DCCP] ccid3: Initialise RTT values [DCCP] ccid: Deprecate ccid_hc_tx_insert_options [DCCP]: Warn when discarding packet due to internal errors [DCCP]: Only deliver to the CCID rx side in charge [DCCP]: Simplify TFRC calculation [DCCP]: Debug timeval operations ...
-rw-r--r--Documentation/networking/dccp.txt6
-rw-r--r--crypto/sha512.c2
-rw-r--r--drivers/atm/.gitignore2
-rw-r--r--drivers/net/hamradio/baycom_epp.c2
-rw-r--r--drivers/net/wan/Kconfig5
-rw-r--r--include/linux/dccp.h26
-rw-r--r--include/linux/tfrc.h8
-rw-r--r--include/net/ax25.h2
-rw-r--r--net/ax25/ax25_addr.c2
-rw-r--r--net/core/netpoll.c30
-rw-r--r--net/dccp/ackvec.c4
-rw-r--r--net/dccp/ccid.h10
-rw-r--r--net/dccp/ccids/ccid2.c12
-rw-r--r--net/dccp/ccids/ccid3.c517
-rw-r--r--net/dccp/ccids/ccid3.h46
-rw-r--r--net/dccp/ccids/lib/packet_history.c219
-rw-r--r--net/dccp/ccids/lib/packet_history.h128
-rw-r--r--net/dccp/ccids/lib/tfrc.h23
-rw-r--r--net/dccp/ccids/lib/tfrc_equation.c28
-rw-r--r--net/dccp/dccp.h3
-rw-r--r--net/dccp/feat.c6
-rw-r--r--net/dccp/input.c47
-rw-r--r--net/dccp/ipv4.c26
-rw-r--r--net/dccp/ipv6.c24
-rw-r--r--net/dccp/minisocks.c2
-rw-r--r--net/dccp/options.c5
-rw-r--r--net/dccp/output.c39
-rw-r--r--net/dccp/proto.c6
-rw-r--r--net/dccp/timer.c14
-rw-r--r--net/ipv4/ipvs/ip_vs_sync.c17
30 files changed, 657 insertions, 604 deletions
diff --git a/Documentation/networking/dccp.txt b/Documentation/networking/dccp.txt
index dda15886bcb..387482e46c4 100644
--- a/Documentation/networking/dccp.txt
+++ b/Documentation/networking/dccp.txt
@@ -19,7 +19,8 @@ for real time and multimedia traffic.
19 19
20It has a base protocol and pluggable congestion control IDs (CCIDs). 20It has a base protocol and pluggable congestion control IDs (CCIDs).
21 21
22It is at experimental RFC status and the homepage for DCCP as a protocol is at: 22It is at proposed standard RFC status and the homepage for DCCP as a protocol
23is at:
23 http://www.read.cs.ucla.edu/dccp/ 24 http://www.read.cs.ucla.edu/dccp/
24 25
25Missing features 26Missing features
@@ -34,9 +35,6 @@ The known bugs are at:
34Socket options 35Socket options
35============== 36==============
36 37
37DCCP_SOCKOPT_PACKET_SIZE is used for CCID3 to set default packet size for
38calculations.
39
40DCCP_SOCKOPT_SERVICE sets the service. The specification mandates use of 38DCCP_SOCKOPT_SERVICE sets the service. The specification mandates use of
41service codes (RFC 4340, sec. 8.1.2); if this socket option is not set, 39service codes (RFC 4340, sec. 8.1.2); if this socket option is not set,
42the socket will fall back to 0 (which means that no meaningful service code 40the socket will fall back to 0 (which means that no meaningful service code
diff --git a/crypto/sha512.c b/crypto/sha512.c
index 2dfe7f170b4..15eab9db9be 100644
--- a/crypto/sha512.c
+++ b/crypto/sha512.c
@@ -24,7 +24,7 @@
24 24
25#define SHA384_DIGEST_SIZE 48 25#define SHA384_DIGEST_SIZE 48
26#define SHA512_DIGEST_SIZE 64 26#define SHA512_DIGEST_SIZE 64
27#define SHA384_HMAC_BLOCK_SIZE 96 27#define SHA384_HMAC_BLOCK_SIZE 128
28#define SHA512_HMAC_BLOCK_SIZE 128 28#define SHA512_HMAC_BLOCK_SIZE 128
29 29
30struct sha512_ctx { 30struct sha512_ctx {
diff --git a/drivers/atm/.gitignore b/drivers/atm/.gitignore
index a165b716771..fc0ae5eb05d 100644
--- a/drivers/atm/.gitignore
+++ b/drivers/atm/.gitignore
@@ -2,4 +2,4 @@
2fore200e_mkfirm 2fore200e_mkfirm
3fore200e_pca_fw.c 3fore200e_pca_fw.c
4pca200e.bin 4pca200e.bin
5 5pca200e_ecd.bin2
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index 8a83db0fb3b..153b6dc80af 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -1177,7 +1177,7 @@ static void baycom_probe(struct net_device *dev)
1177 dev->mtu = AX25_DEF_PACLEN; /* eth_mtu is the default */ 1177 dev->mtu = AX25_DEF_PACLEN; /* eth_mtu is the default */
1178 dev->addr_len = AX25_ADDR_LEN; /* sizeof an ax.25 address */ 1178 dev->addr_len = AX25_ADDR_LEN; /* sizeof an ax.25 address */
1179 memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN); 1179 memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
1180 memcpy(dev->dev_addr, &ax25_nocall, AX25_ADDR_LEN); 1180 memcpy(dev->dev_addr, &null_ax25_address, AX25_ADDR_LEN);
1181 dev->tx_queue_len = 16; 1181 dev->tx_queue_len = 16;
1182 1182
1183 /* New style flags */ 1183 /* New style flags */
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index d5ab9cf1325..21f76f51c95 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -382,7 +382,7 @@ config SDLA
382 382
383# Wan router core. 383# Wan router core.
384config WAN_ROUTER_DRIVERS 384config WAN_ROUTER_DRIVERS
385 bool "WAN router drivers" 385 tristate "WAN router drivers"
386 depends on WAN && WAN_ROUTER 386 depends on WAN && WAN_ROUTER
387 ---help--- 387 ---help---
388 Connect LAN to WAN via Linux box. 388 Connect LAN to WAN via Linux box.
@@ -393,7 +393,8 @@ config WAN_ROUTER_DRIVERS
393 <file:Documentation/networking/wan-router.txt>. 393 <file:Documentation/networking/wan-router.txt>.
394 394
395 Note that the answer to this question won't directly affect the 395 Note that the answer to this question won't directly affect the
396 kernel: saying N will just cause the configurator to skip all 396 kernel except for how subordinate drivers may be built:
397 saying N will just cause the configurator to skip all
397 the questions about WAN router drivers. 398 the questions about WAN router drivers.
398 399
399 If unsure, say N. 400 If unsure, say N.
diff --git a/include/linux/dccp.h b/include/linux/dccp.h
index ed6cc8962d8..1cb054bd93f 100644
--- a/include/linux/dccp.h
+++ b/include/linux/dccp.h
@@ -176,20 +176,20 @@ enum {
176}; 176};
177 177
178/* DCCP features (RFC 4340 section 6.4) */ 178/* DCCP features (RFC 4340 section 6.4) */
179 enum { 179enum {
180 DCCPF_RESERVED = 0, 180 DCCPF_RESERVED = 0,
181 DCCPF_CCID = 1, 181 DCCPF_CCID = 1,
182 DCCPF_SHORT_SEQNOS = 2, /* XXX: not yet implemented */ 182 DCCPF_SHORT_SEQNOS = 2, /* XXX: not yet implemented */
183 DCCPF_SEQUENCE_WINDOW = 3, 183 DCCPF_SEQUENCE_WINDOW = 3,
184 DCCPF_ECN_INCAPABLE = 4, /* XXX: not yet implemented */ 184 DCCPF_ECN_INCAPABLE = 4, /* XXX: not yet implemented */
185 DCCPF_ACK_RATIO = 5, 185 DCCPF_ACK_RATIO = 5,
186 DCCPF_SEND_ACK_VECTOR = 6, 186 DCCPF_SEND_ACK_VECTOR = 6,
187 DCCPF_SEND_NDP_COUNT = 7, 187 DCCPF_SEND_NDP_COUNT = 7,
188 DCCPF_MIN_CSUM_COVER = 8, 188 DCCPF_MIN_CSUM_COVER = 8,
189 DCCPF_DATA_CHECKSUM = 9, /* XXX: not yet implemented */ 189 DCCPF_DATA_CHECKSUM = 9, /* XXX: not yet implemented */
190 /* 10-127 reserved */ 190 /* 10-127 reserved */
191 DCCPF_MIN_CCID_SPECIFIC = 128, 191 DCCPF_MIN_CCID_SPECIFIC = 128,
192 DCCPF_MAX_CCID_SPECIFIC = 255, 192 DCCPF_MAX_CCID_SPECIFIC = 255,
193}; 193};
194 194
195/* this structure is argument to DCCP_SOCKOPT_CHANGE_X */ 195/* this structure is argument to DCCP_SOCKOPT_CHANGE_X */
@@ -427,7 +427,7 @@ struct dccp_service_list {
427}; 427};
428 428
429#define DCCP_SERVICE_INVALID_VALUE htonl((__u32)-1) 429#define DCCP_SERVICE_INVALID_VALUE htonl((__u32)-1)
430#define DCCP_SERVICE_CODE_IS_ABSENT 0 430#define DCCP_SERVICE_CODE_IS_ABSENT 0
431 431
432static inline int dccp_list_has_service(const struct dccp_service_list *sl, 432static inline int dccp_list_has_service(const struct dccp_service_list *sl,
433 const __be32 service) 433 const __be32 service)
@@ -436,7 +436,7 @@ static inline int dccp_list_has_service(const struct dccp_service_list *sl,
436 u32 i = sl->dccpsl_nr; 436 u32 i = sl->dccpsl_nr;
437 while (i--) 437 while (i--)
438 if (sl->dccpsl_list[i] == service) 438 if (sl->dccpsl_list[i] == service)
439 return 1; 439 return 1;
440 } 440 }
441 return 0; 441 return 0;
442} 442}
@@ -511,7 +511,7 @@ struct dccp_sock {
511 __u8 dccps_hc_tx_insert_options:1; 511 __u8 dccps_hc_tx_insert_options:1;
512 struct timer_list dccps_xmit_timer; 512 struct timer_list dccps_xmit_timer;
513}; 513};
514 514
515static inline struct dccp_sock *dccp_sk(const struct sock *sk) 515static inline struct dccp_sock *dccp_sk(const struct sock *sk)
516{ 516{
517 return (struct dccp_sock *)sk; 517 return (struct dccp_sock *)sk;
diff --git a/include/linux/tfrc.h b/include/linux/tfrc.h
index 31a9b25276f..8a8462b4a4d 100644
--- a/include/linux/tfrc.h
+++ b/include/linux/tfrc.h
@@ -37,10 +37,14 @@ struct tfrc_rx_info {
37 * @tfrctx_p: current loss event rate (5.4) 37 * @tfrctx_p: current loss event rate (5.4)
38 * @tfrctx_rto: estimate of RTO, equals 4*RTT (4.3) 38 * @tfrctx_rto: estimate of RTO, equals 4*RTT (4.3)
39 * @tfrctx_ipi: inter-packet interval (4.6) 39 * @tfrctx_ipi: inter-packet interval (4.6)
40 *
41 * Note: X and X_recv are both maintained in units of 64 * bytes/second. This
42 * enables a finer resolution of sending rates and avoids problems with
43 * integer arithmetic; u32 is not sufficient as scaling consumes 6 bits.
40 */ 44 */
41struct tfrc_tx_info { 45struct tfrc_tx_info {
42 __u32 tfrctx_x; 46 __u64 tfrctx_x;
43 __u32 tfrctx_x_recv; 47 __u64 tfrctx_x_recv;
44 __u32 tfrctx_x_calc; 48 __u32 tfrctx_x_calc;
45 __u32 tfrctx_rtt; 49 __u32 tfrctx_rtt;
46 __u32 tfrctx_p; 50 __u32 tfrctx_p;
diff --git a/include/net/ax25.h b/include/net/ax25.h
index e1d116f1192..14b72d868f0 100644
--- a/include/net/ax25.h
+++ b/include/net/ax25.h
@@ -285,6 +285,8 @@ extern struct sock *ax25_make_new(struct sock *, struct ax25_dev *);
285extern const ax25_address ax25_bcast; 285extern const ax25_address ax25_bcast;
286extern const ax25_address ax25_defaddr; 286extern const ax25_address ax25_defaddr;
287extern const ax25_address null_ax25_address; 287extern const ax25_address null_ax25_address;
288extern char *ax2asc(char *buf, const ax25_address *);
289extern void asc2ax(ax25_address *addr, const char *callsign);
288extern int ax25cmp(const ax25_address *, const ax25_address *); 290extern int ax25cmp(const ax25_address *, const ax25_address *);
289extern int ax25digicmp(const ax25_digi *, const ax25_digi *); 291extern int ax25digicmp(const ax25_digi *, const ax25_digi *);
290extern const unsigned char *ax25_addr_parse(const unsigned char *, int, 292extern const unsigned char *ax25_addr_parse(const unsigned char *, int,
diff --git a/net/ax25/ax25_addr.c b/net/ax25/ax25_addr.c
index 21a0616152f..97a49c79c60 100644
--- a/net/ax25/ax25_addr.c
+++ b/net/ax25/ax25_addr.c
@@ -83,7 +83,7 @@ EXPORT_SYMBOL(ax2asc);
83 */ 83 */
84void asc2ax(ax25_address *addr, const char *callsign) 84void asc2ax(ax25_address *addr, const char *callsign)
85{ 85{
86 char *s; 86 const char *s;
87 int n; 87 int n;
88 88
89 for (s = callsign, n = 0; n < 6; n++) { 89 for (s = callsign, n = 0; n < 6; n++) {
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 8a271285f2f..a01abdd2d3e 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -242,22 +242,28 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
242 242
243 /* don't get messages out of order, and no recursion */ 243 /* don't get messages out of order, and no recursion */
244 if (skb_queue_len(&npinfo->txq) == 0 && 244 if (skb_queue_len(&npinfo->txq) == 0 &&
245 npinfo->poll_owner != smp_processor_id() && 245 npinfo->poll_owner != smp_processor_id()) {
246 netif_tx_trylock(dev)) { 246 unsigned long flags;
247 /* try until next clock tick */
248 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; tries > 0; --tries) {
249 if (!netif_queue_stopped(dev))
250 status = dev->hard_start_xmit(skb, dev);
251 247
252 if (status == NETDEV_TX_OK) 248 local_irq_save(flags);
253 break; 249 if (netif_tx_trylock(dev)) {
250 /* try until next clock tick */
251 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
252 tries > 0; --tries) {
253 if (!netif_queue_stopped(dev))
254 status = dev->hard_start_xmit(skb, dev);
254 255
255 /* tickle device maybe there is some cleanup */ 256 if (status == NETDEV_TX_OK)
256 netpoll_poll(np); 257 break;
257 258
258 udelay(USEC_PER_POLL); 259 /* tickle device maybe there is some cleanup */
260 netpoll_poll(np);
261
262 udelay(USEC_PER_POLL);
263 }
264 netif_tx_unlock(dev);
259 } 265 }
260 netif_tx_unlock(dev); 266 local_irq_restore(flags);
261 } 267 }
262 268
263 if (status != NETDEV_TX_OK) { 269 if (status != NETDEV_TX_OK) {
diff --git a/net/dccp/ackvec.c b/net/dccp/ackvec.c
index 1f4727ddbdb..a086c6312d3 100644
--- a/net/dccp/ackvec.c
+++ b/net/dccp/ackvec.c
@@ -223,7 +223,7 @@ static inline int dccp_ackvec_set_buf_head_state(struct dccp_ackvec *av,
223 gap = -new_head; 223 gap = -new_head;
224 } 224 }
225 new_head += DCCP_MAX_ACKVEC_LEN; 225 new_head += DCCP_MAX_ACKVEC_LEN;
226 } 226 }
227 227
228 av->dccpav_buf_head = new_head; 228 av->dccpav_buf_head = new_head;
229 229
@@ -336,7 +336,7 @@ out_duplicate:
336void dccp_ackvector_print(const u64 ackno, const unsigned char *vector, int len) 336void dccp_ackvector_print(const u64 ackno, const unsigned char *vector, int len)
337{ 337{
338 dccp_pr_debug_cat("ACK vector len=%d, ackno=%llu |", len, 338 dccp_pr_debug_cat("ACK vector len=%d, ackno=%llu |", len,
339 (unsigned long long)ackno); 339 (unsigned long long)ackno);
340 340
341 while (len--) { 341 while (len--) {
342 const u8 state = (*vector & DCCP_ACKVEC_STATE_MASK) >> 6; 342 const u8 state = (*vector & DCCP_ACKVEC_STATE_MASK) >> 6;
diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h
index bcc2d12ae81..c65cb2453e4 100644
--- a/net/dccp/ccid.h
+++ b/net/dccp/ccid.h
@@ -43,8 +43,6 @@ struct ccid_operations {
43 unsigned char* value); 43 unsigned char* value);
44 int (*ccid_hc_rx_insert_options)(struct sock *sk, 44 int (*ccid_hc_rx_insert_options)(struct sock *sk,
45 struct sk_buff *skb); 45 struct sk_buff *skb);
46 int (*ccid_hc_tx_insert_options)(struct sock *sk,
47 struct sk_buff *skb);
48 void (*ccid_hc_tx_packet_recv)(struct sock *sk, 46 void (*ccid_hc_tx_packet_recv)(struct sock *sk,
49 struct sk_buff *skb); 47 struct sk_buff *skb);
50 int (*ccid_hc_tx_parse_options)(struct sock *sk, 48 int (*ccid_hc_tx_parse_options)(struct sock *sk,
@@ -146,14 +144,6 @@ static inline int ccid_hc_rx_parse_options(struct ccid *ccid, struct sock *sk,
146 return rc; 144 return rc;
147} 145}
148 146
149static inline int ccid_hc_tx_insert_options(struct ccid *ccid, struct sock *sk,
150 struct sk_buff *skb)
151{
152 if (ccid->ccid_ops->ccid_hc_tx_insert_options != NULL)
153 return ccid->ccid_ops->ccid_hc_tx_insert_options(sk, skb);
154 return 0;
155}
156
157static inline int ccid_hc_rx_insert_options(struct ccid *ccid, struct sock *sk, 147static inline int ccid_hc_rx_insert_options(struct ccid *ccid, struct sock *sk,
158 struct sk_buff *skb) 148 struct sk_buff *skb)
159{ 149{
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index 2555be8f479..fd38b05d6f7 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -351,7 +351,7 @@ static void ccid2_hc_tx_packet_sent(struct sock *sk, int more, unsigned int len)
351 351
352 while (seqp != hctx->ccid2hctx_seqh) { 352 while (seqp != hctx->ccid2hctx_seqh) {
353 ccid2_pr_debug("out seq=%llu acked=%d time=%lu\n", 353 ccid2_pr_debug("out seq=%llu acked=%d time=%lu\n",
354 (unsigned long long)seqp->ccid2s_seq, 354 (unsigned long long)seqp->ccid2s_seq,
355 seqp->ccid2s_acked, seqp->ccid2s_sent); 355 seqp->ccid2s_acked, seqp->ccid2s_sent);
356 seqp = seqp->ccid2s_next; 356 seqp = seqp->ccid2s_next;
357 } 357 }
@@ -473,7 +473,7 @@ static inline void ccid2_new_ack(struct sock *sk,
473 /* first measurement */ 473 /* first measurement */
474 if (hctx->ccid2hctx_srtt == -1) { 474 if (hctx->ccid2hctx_srtt == -1) {
475 ccid2_pr_debug("R: %lu Time=%lu seq=%llu\n", 475 ccid2_pr_debug("R: %lu Time=%lu seq=%llu\n",
476 r, jiffies, 476 r, jiffies,
477 (unsigned long long)seqp->ccid2s_seq); 477 (unsigned long long)seqp->ccid2s_seq);
478 ccid2_change_srtt(hctx, r); 478 ccid2_change_srtt(hctx, r);
479 hctx->ccid2hctx_rttvar = r >> 1; 479 hctx->ccid2hctx_rttvar = r >> 1;
@@ -518,8 +518,8 @@ static inline void ccid2_new_ack(struct sock *sk,
518 hctx->ccid2hctx_lastrtt = jiffies; 518 hctx->ccid2hctx_lastrtt = jiffies;
519 519
520 ccid2_pr_debug("srtt: %ld rttvar: %ld rto: %ld (HZ=%d) R=%lu\n", 520 ccid2_pr_debug("srtt: %ld rttvar: %ld rto: %ld (HZ=%d) R=%lu\n",
521 hctx->ccid2hctx_srtt, hctx->ccid2hctx_rttvar, 521 hctx->ccid2hctx_srtt, hctx->ccid2hctx_rttvar,
522 hctx->ccid2hctx_rto, HZ, r); 522 hctx->ccid2hctx_rto, HZ, r);
523 hctx->ccid2hctx_sent = 0; 523 hctx->ccid2hctx_sent = 0;
524 } 524 }
525 525
@@ -667,9 +667,9 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
667 /* new packet received or marked */ 667 /* new packet received or marked */
668 if (state != DCCP_ACKVEC_STATE_NOT_RECEIVED && 668 if (state != DCCP_ACKVEC_STATE_NOT_RECEIVED &&
669 !seqp->ccid2s_acked) { 669 !seqp->ccid2s_acked) {
670 if (state == 670 if (state ==
671 DCCP_ACKVEC_STATE_ECN_MARKED) { 671 DCCP_ACKVEC_STATE_ECN_MARKED) {
672 ccid2_congestion_event(hctx, 672 ccid2_congestion_event(hctx,
673 seqp); 673 seqp);
674 } else 674 } else
675 ccid2_new_ack(sk, seqp, 675 ccid2_new_ack(sk, seqp,
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
index 66a27b9688c..fa6b75372ed 100644
--- a/net/dccp/ccids/ccid3.c
+++ b/net/dccp/ccids/ccid3.c
@@ -41,27 +41,6 @@
41#include "lib/tfrc.h" 41#include "lib/tfrc.h"
42#include "ccid3.h" 42#include "ccid3.h"
43 43
44/*
45 * Reason for maths here is to avoid 32 bit overflow when a is big.
46 * With this we get close to the limit.
47 */
48static u32 usecs_div(const u32 a, const u32 b)
49{
50 const u32 div = a < (UINT_MAX / (USEC_PER_SEC / 10)) ? 10 :
51 a < (UINT_MAX / (USEC_PER_SEC / 50)) ? 50 :
52 a < (UINT_MAX / (USEC_PER_SEC / 100)) ? 100 :
53 a < (UINT_MAX / (USEC_PER_SEC / 500)) ? 500 :
54 a < (UINT_MAX / (USEC_PER_SEC / 1000)) ? 1000 :
55 a < (UINT_MAX / (USEC_PER_SEC / 5000)) ? 5000 :
56 a < (UINT_MAX / (USEC_PER_SEC / 10000)) ? 10000 :
57 a < (UINT_MAX / (USEC_PER_SEC / 50000)) ? 50000 :
58 100000;
59 const u32 tmp = a * (USEC_PER_SEC / div);
60 return (b >= 2 * div) ? tmp / (b / div) : tmp;
61}
62
63
64
65#ifdef CONFIG_IP_DCCP_CCID3_DEBUG 44#ifdef CONFIG_IP_DCCP_CCID3_DEBUG
66static int ccid3_debug; 45static int ccid3_debug;
67#define ccid3_pr_debug(format, a...) DCCP_PR_DEBUG(ccid3_debug, format, ##a) 46#define ccid3_pr_debug(format, a...) DCCP_PR_DEBUG(ccid3_debug, format, ##a)
@@ -108,8 +87,9 @@ static inline void ccid3_update_send_time(struct ccid3_hc_tx_sock *hctx)
108{ 87{
109 timeval_sub_usecs(&hctx->ccid3hctx_t_nom, hctx->ccid3hctx_t_ipi); 88 timeval_sub_usecs(&hctx->ccid3hctx_t_nom, hctx->ccid3hctx_t_ipi);
110 89
111 /* Calculate new t_ipi (inter packet interval) by t_ipi = s / X_inst */ 90 /* Calculate new t_ipi = s / X_inst (X_inst is in 64 * bytes/second) */
112 hctx->ccid3hctx_t_ipi = usecs_div(hctx->ccid3hctx_s, hctx->ccid3hctx_x); 91 hctx->ccid3hctx_t_ipi = scaled_div(hctx->ccid3hctx_s,
92 hctx->ccid3hctx_x >> 6);
113 93
114 /* Update nominal send time with regard to the new t_ipi */ 94 /* Update nominal send time with regard to the new t_ipi */
115 timeval_add_usecs(&hctx->ccid3hctx_t_nom, hctx->ccid3hctx_t_ipi); 95 timeval_add_usecs(&hctx->ccid3hctx_t_nom, hctx->ccid3hctx_t_ipi);
@@ -128,40 +108,44 @@ static inline void ccid3_update_send_time(struct ccid3_hc_tx_sock *hctx)
128 * X = max(min(2 * X, 2 * X_recv), s / R); 108 * X = max(min(2 * X, 2 * X_recv), s / R);
129 * tld = now; 109 * tld = now;
130 * 110 *
111 * Note: X and X_recv are both stored in units of 64 * bytes/second, to support
112 * fine-grained resolution of sending rates. This requires scaling by 2^6
113 * throughout the code. Only X_calc is unscaled (in bytes/second).
114 *
131 * If X has changed, we also update the scheduled send time t_now, 115 * If X has changed, we also update the scheduled send time t_now,
132 * the inter-packet interval t_ipi, and the delta value. 116 * the inter-packet interval t_ipi, and the delta value.
133 */ 117 */
134static void ccid3_hc_tx_update_x(struct sock *sk, struct timeval *now) 118static void ccid3_hc_tx_update_x(struct sock *sk, struct timeval *now)
135 119
136{ 120{
137 struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); 121 struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
138 const __u32 old_x = hctx->ccid3hctx_x; 122 const __u64 old_x = hctx->ccid3hctx_x;
139 123
140 if (hctx->ccid3hctx_p > 0) { 124 if (hctx->ccid3hctx_p > 0) {
141 hctx->ccid3hctx_x_calc = tfrc_calc_x(hctx->ccid3hctx_s, 125
142 hctx->ccid3hctx_rtt, 126 hctx->ccid3hctx_x = min(((__u64)hctx->ccid3hctx_x_calc) << 6,
143 hctx->ccid3hctx_p); 127 hctx->ccid3hctx_x_recv * 2);
144 hctx->ccid3hctx_x = max_t(u32, min(hctx->ccid3hctx_x_calc, 128 hctx->ccid3hctx_x = max(hctx->ccid3hctx_x,
145 hctx->ccid3hctx_x_recv * 2), 129 (((__u64)hctx->ccid3hctx_s) << 6) /
146 hctx->ccid3hctx_s / TFRC_T_MBI); 130 TFRC_T_MBI);
147 131
148 } else if (timeval_delta(now, &hctx->ccid3hctx_t_ld) >= 132 } else if (timeval_delta(now, &hctx->ccid3hctx_t_ld) -
149 hctx->ccid3hctx_rtt) { 133 (suseconds_t)hctx->ccid3hctx_rtt >= 0) {
150 hctx->ccid3hctx_x = max(min(hctx->ccid3hctx_x_recv, 134
151 hctx->ccid3hctx_x ) * 2, 135 hctx->ccid3hctx_x =
152 usecs_div(hctx->ccid3hctx_s, 136 max(2 * min(hctx->ccid3hctx_x, hctx->ccid3hctx_x_recv),
153 hctx->ccid3hctx_rtt) ); 137 scaled_div(((__u64)hctx->ccid3hctx_s) << 6,
138 hctx->ccid3hctx_rtt));
154 hctx->ccid3hctx_t_ld = *now; 139 hctx->ccid3hctx_t_ld = *now;
155 } else 140 }
156 ccid3_pr_debug("Not changing X\n");
157 141
158 if (hctx->ccid3hctx_x != old_x) 142 if (hctx->ccid3hctx_x != old_x)
159 ccid3_update_send_time(hctx); 143 ccid3_update_send_time(hctx);
160} 144}
161 145
162/* 146/*
163 * Track the mean packet size `s' (cf. RFC 4342, 5.3 and RFC 3448, 4.1) 147 * Track the mean packet size `s' (cf. RFC 4342, 5.3 and RFC 3448, 4.1)
164 * @len: DCCP packet payload size in bytes 148 * @len: DCCP packet payload size in bytes
165 */ 149 */
166static inline void ccid3_hc_tx_update_s(struct ccid3_hc_tx_sock *hctx, int len) 150static inline void ccid3_hc_tx_update_s(struct ccid3_hc_tx_sock *hctx, int len)
167{ 151{
@@ -178,6 +162,33 @@ static inline void ccid3_hc_tx_update_s(struct ccid3_hc_tx_sock *hctx, int len)
178 */ 162 */
179} 163}
180 164
165/*
166 * Update Window Counter using the algorithm from [RFC 4342, 8.1].
167 * The algorithm is not applicable if RTT < 4 microseconds.
168 */
169static inline void ccid3_hc_tx_update_win_count(struct ccid3_hc_tx_sock *hctx,
170 struct timeval *now)
171{
172 suseconds_t delta;
173 u32 quarter_rtts;
174
175 if (unlikely(hctx->ccid3hctx_rtt < 4)) /* avoid divide-by-zero */
176 return;
177
178 delta = timeval_delta(now, &hctx->ccid3hctx_t_last_win_count);
179 DCCP_BUG_ON(delta < 0);
180
181 quarter_rtts = (u32)delta / (hctx->ccid3hctx_rtt / 4);
182
183 if (quarter_rtts > 0) {
184 hctx->ccid3hctx_t_last_win_count = *now;
185 hctx->ccid3hctx_last_win_count += min_t(u32, quarter_rtts, 5);
186 hctx->ccid3hctx_last_win_count &= 0xF; /* mod 16 */
187
188 ccid3_pr_debug("now at %#X\n", hctx->ccid3hctx_last_win_count);
189 }
190}
191
181static void ccid3_hc_tx_no_feedback_timer(unsigned long data) 192static void ccid3_hc_tx_no_feedback_timer(unsigned long data)
182{ 193{
183 struct sock *sk = (struct sock *)data; 194 struct sock *sk = (struct sock *)data;
@@ -191,20 +202,20 @@ static void ccid3_hc_tx_no_feedback_timer(unsigned long data)
191 goto restart_timer; 202 goto restart_timer;
192 } 203 }
193 204
194 ccid3_pr_debug("%s, sk=%p, state=%s\n", dccp_role(sk), sk, 205 ccid3_pr_debug("%s(%p, state=%s) - entry \n", dccp_role(sk), sk,
195 ccid3_tx_state_name(hctx->ccid3hctx_state)); 206 ccid3_tx_state_name(hctx->ccid3hctx_state));
196 207
197 switch (hctx->ccid3hctx_state) { 208 switch (hctx->ccid3hctx_state) {
198 case TFRC_SSTATE_NO_FBACK: 209 case TFRC_SSTATE_NO_FBACK:
199 /* RFC 3448, 4.4: Halve send rate directly */ 210 /* RFC 3448, 4.4: Halve send rate directly */
200 hctx->ccid3hctx_x = min_t(u32, hctx->ccid3hctx_x / 2, 211 hctx->ccid3hctx_x = max(hctx->ccid3hctx_x / 2,
201 hctx->ccid3hctx_s / TFRC_T_MBI); 212 (((__u64)hctx->ccid3hctx_s) << 6) /
213 TFRC_T_MBI);
202 214
203 ccid3_pr_debug("%s, sk=%p, state=%s, updated tx rate to %d " 215 ccid3_pr_debug("%s(%p, state=%s), updated tx rate to %u "
204 "bytes/s\n", 216 "bytes/s\n", dccp_role(sk), sk,
205 dccp_role(sk), sk,
206 ccid3_tx_state_name(hctx->ccid3hctx_state), 217 ccid3_tx_state_name(hctx->ccid3hctx_state),
207 hctx->ccid3hctx_x); 218 (unsigned)(hctx->ccid3hctx_x >> 6));
208 /* The value of R is still undefined and so we can not recompute 219 /* The value of R is still undefined and so we can not recompute
209 * the timout value. Keep initial value as per [RFC 4342, 5]. */ 220 * the timout value. Keep initial value as per [RFC 4342, 5]. */
210 t_nfb = TFRC_INITIAL_TIMEOUT; 221 t_nfb = TFRC_INITIAL_TIMEOUT;
@@ -213,34 +224,46 @@ static void ccid3_hc_tx_no_feedback_timer(unsigned long data)
213 case TFRC_SSTATE_FBACK: 224 case TFRC_SSTATE_FBACK:
214 /* 225 /*
215 * Check if IDLE since last timeout and recv rate is less than 226 * Check if IDLE since last timeout and recv rate is less than
216 * 4 packets per RTT 227 * 4 packets (in units of 64*bytes/sec) per RTT
217 */ 228 */
218 if (!hctx->ccid3hctx_idle || 229 if (!hctx->ccid3hctx_idle ||
219 (hctx->ccid3hctx_x_recv >= 230 (hctx->ccid3hctx_x_recv >= 4 *
220 4 * usecs_div(hctx->ccid3hctx_s, hctx->ccid3hctx_rtt))) { 231 scaled_div(((__u64)hctx->ccid3hctx_s) << 6,
232 hctx->ccid3hctx_rtt))) {
221 struct timeval now; 233 struct timeval now;
222 234
223 ccid3_pr_debug("%s, sk=%p, state=%s, not idle\n", 235 ccid3_pr_debug("%s(%p, state=%s), not idle\n",
224 dccp_role(sk), sk, 236 dccp_role(sk), sk,
225 ccid3_tx_state_name(hctx->ccid3hctx_state)); 237 ccid3_tx_state_name(hctx->ccid3hctx_state));
226 /* Halve sending rate */
227 238
228 /* If (p == 0 || X_calc > 2 * X_recv) 239 /*
240 * Modify the cached value of X_recv [RFC 3448, 4.4]
241 *
242 * If (p == 0 || X_calc > 2 * X_recv)
229 * X_recv = max(X_recv / 2, s / (2 * t_mbi)); 243 * X_recv = max(X_recv / 2, s / (2 * t_mbi));
230 * Else 244 * Else
231 * X_recv = X_calc / 4; 245 * X_recv = X_calc / 4;
246 *
247 * Note that X_recv is scaled by 2^6 while X_calc is not
232 */ 248 */
233 BUG_ON(hctx->ccid3hctx_p && !hctx->ccid3hctx_x_calc); 249 BUG_ON(hctx->ccid3hctx_p && !hctx->ccid3hctx_x_calc);
234 250
235 if (hctx->ccid3hctx_p == 0 || 251 if (hctx->ccid3hctx_p == 0 ||
236 hctx->ccid3hctx_x_calc > 2 * hctx->ccid3hctx_x_recv) 252 (hctx->ccid3hctx_x_calc >
237 hctx->ccid3hctx_x_recv = max_t(u32, hctx->ccid3hctx_x_recv / 2, 253 (hctx->ccid3hctx_x_recv >> 5))) {
238 hctx->ccid3hctx_s / (2 * TFRC_T_MBI)); 254
239 else 255 hctx->ccid3hctx_x_recv =
240 hctx->ccid3hctx_x_recv = hctx->ccid3hctx_x_calc / 4; 256 max(hctx->ccid3hctx_x_recv / 2,
241 257 (((__u64)hctx->ccid3hctx_s) << 6) /
242 /* Update sending rate */ 258 (2 * TFRC_T_MBI));
243 dccp_timestamp(sk, &now); 259
260 if (hctx->ccid3hctx_p == 0)
261 dccp_timestamp(sk, &now);
262 } else {
263 hctx->ccid3hctx_x_recv = hctx->ccid3hctx_x_calc;
264 hctx->ccid3hctx_x_recv <<= 4;
265 }
266 /* Now recalculate X [RFC 3448, 4.3, step (4)] */
244 ccid3_hc_tx_update_x(sk, &now); 267 ccid3_hc_tx_update_x(sk, &now);
245 } 268 }
246 /* 269 /*
@@ -251,7 +274,7 @@ static void ccid3_hc_tx_no_feedback_timer(unsigned long data)
251 t_nfb = max(hctx->ccid3hctx_t_rto, 2 * hctx->ccid3hctx_t_ipi); 274 t_nfb = max(hctx->ccid3hctx_t_rto, 2 * hctx->ccid3hctx_t_ipi);
252 break; 275 break;
253 case TFRC_SSTATE_NO_SENT: 276 case TFRC_SSTATE_NO_SENT:
254 DCCP_BUG("Illegal %s state NO_SENT, sk=%p", dccp_role(sk), sk); 277 DCCP_BUG("%s(%p) - Illegal state NO_SENT", dccp_role(sk), sk);
255 /* fall through */ 278 /* fall through */
256 case TFRC_SSTATE_TERM: 279 case TFRC_SSTATE_TERM:
257 goto out; 280 goto out;
@@ -277,9 +300,8 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
277{ 300{
278 struct dccp_sock *dp = dccp_sk(sk); 301 struct dccp_sock *dp = dccp_sk(sk);
279 struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); 302 struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
280 struct dccp_tx_hist_entry *new_packet;
281 struct timeval now; 303 struct timeval now;
282 long delay; 304 suseconds_t delay;
283 305
284 BUG_ON(hctx == NULL); 306 BUG_ON(hctx == NULL);
285 307
@@ -291,34 +313,21 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
291 if (unlikely(skb->len == 0)) 313 if (unlikely(skb->len == 0))
292 return -EBADMSG; 314 return -EBADMSG;
293 315
294 /* See if last packet allocated was not sent */
295 new_packet = dccp_tx_hist_head(&hctx->ccid3hctx_hist);
296 if (new_packet == NULL || new_packet->dccphtx_sent) {
297 new_packet = dccp_tx_hist_entry_new(ccid3_tx_hist,
298 GFP_ATOMIC);
299
300 if (unlikely(new_packet == NULL)) {
301 DCCP_WARN("%s, sk=%p, not enough mem to add to history,"
302 "send refused\n", dccp_role(sk), sk);
303 return -ENOBUFS;
304 }
305
306 dccp_tx_hist_add_entry(&hctx->ccid3hctx_hist, new_packet);
307 }
308
309 dccp_timestamp(sk, &now); 316 dccp_timestamp(sk, &now);
310 317
311 switch (hctx->ccid3hctx_state) { 318 switch (hctx->ccid3hctx_state) {
312 case TFRC_SSTATE_NO_SENT: 319 case TFRC_SSTATE_NO_SENT:
313 sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer, 320 sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer,
314 jiffies + usecs_to_jiffies(TFRC_INITIAL_TIMEOUT)); 321 (jiffies +
322 usecs_to_jiffies(TFRC_INITIAL_TIMEOUT)));
315 hctx->ccid3hctx_last_win_count = 0; 323 hctx->ccid3hctx_last_win_count = 0;
316 hctx->ccid3hctx_t_last_win_count = now; 324 hctx->ccid3hctx_t_last_win_count = now;
317 ccid3_hc_tx_set_state(sk, TFRC_SSTATE_NO_FBACK); 325 ccid3_hc_tx_set_state(sk, TFRC_SSTATE_NO_FBACK);
318 326
319 /* Set initial sending rate to 1 packet per second */ 327 /* Set initial sending rate X/s to 1pps (X is scaled by 2^6) */
320 ccid3_hc_tx_update_s(hctx, skb->len); 328 ccid3_hc_tx_update_s(hctx, skb->len);
321 hctx->ccid3hctx_x = hctx->ccid3hctx_s; 329 hctx->ccid3hctx_x = hctx->ccid3hctx_s;
330 hctx->ccid3hctx_x <<= 6;
322 331
323 /* First timeout, according to [RFC 3448, 4.2], is 1 second */ 332 /* First timeout, according to [RFC 3448, 4.2], is 1 second */
324 hctx->ccid3hctx_t_ipi = USEC_PER_SEC; 333 hctx->ccid3hctx_t_ipi = USEC_PER_SEC;
@@ -332,77 +341,57 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
332 case TFRC_SSTATE_FBACK: 341 case TFRC_SSTATE_FBACK:
333 delay = timeval_delta(&hctx->ccid3hctx_t_nom, &now); 342 delay = timeval_delta(&hctx->ccid3hctx_t_nom, &now);
334 /* 343 /*
335 * Scheduling of packet transmissions [RFC 3448, 4.6] 344 * Scheduling of packet transmissions [RFC 3448, 4.6]
336 * 345 *
337 * if (t_now > t_nom - delta) 346 * if (t_now > t_nom - delta)
338 * // send the packet now 347 * // send the packet now
339 * else 348 * else
340 * // send the packet in (t_nom - t_now) milliseconds. 349 * // send the packet in (t_nom - t_now) milliseconds.
341 */ 350 */
342 if (delay - (long)hctx->ccid3hctx_delta >= 0) 351 if (delay - (suseconds_t)hctx->ccid3hctx_delta >= 0)
343 return delay / 1000L; 352 return delay / 1000L;
353
354 ccid3_hc_tx_update_win_count(hctx, &now);
344 break; 355 break;
345 case TFRC_SSTATE_TERM: 356 case TFRC_SSTATE_TERM:
346 DCCP_BUG("Illegal %s state TERM, sk=%p", dccp_role(sk), sk); 357 DCCP_BUG("%s(%p) - Illegal state TERM", dccp_role(sk), sk);
347 return -EINVAL; 358 return -EINVAL;
348 } 359 }
349 360
350 /* prepare to send now (add options etc.) */ 361 /* prepare to send now (add options etc.) */
351 dp->dccps_hc_tx_insert_options = 1; 362 dp->dccps_hc_tx_insert_options = 1;
352 new_packet->dccphtx_ccval = DCCP_SKB_CB(skb)->dccpd_ccval = 363 DCCP_SKB_CB(skb)->dccpd_ccval = hctx->ccid3hctx_last_win_count;
353 hctx->ccid3hctx_last_win_count; 364
365 /* set the nominal send time for the next following packet */
354 timeval_add_usecs(&hctx->ccid3hctx_t_nom, hctx->ccid3hctx_t_ipi); 366 timeval_add_usecs(&hctx->ccid3hctx_t_nom, hctx->ccid3hctx_t_ipi);
355 367
356 return 0; 368 return 0;
357} 369}
358 370
359static void ccid3_hc_tx_packet_sent(struct sock *sk, int more, unsigned int len) 371static void ccid3_hc_tx_packet_sent(struct sock *sk, int more,
372 unsigned int len)
360{ 373{
361 const struct dccp_sock *dp = dccp_sk(sk);
362 struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); 374 struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
363 struct timeval now; 375 struct timeval now;
364 unsigned long quarter_rtt;
365 struct dccp_tx_hist_entry *packet; 376 struct dccp_tx_hist_entry *packet;
366 377
367 BUG_ON(hctx == NULL); 378 BUG_ON(hctx == NULL);
368 379
369 dccp_timestamp(sk, &now);
370
371 ccid3_hc_tx_update_s(hctx, len); 380 ccid3_hc_tx_update_s(hctx, len);
372 381
373 packet = dccp_tx_hist_head(&hctx->ccid3hctx_hist); 382 packet = dccp_tx_hist_entry_new(ccid3_tx_hist, GFP_ATOMIC);
374 if (unlikely(packet == NULL)) { 383 if (unlikely(packet == NULL)) {
375 DCCP_WARN("packet doesn't exist in history!\n"); 384 DCCP_CRIT("packet history - out of memory!");
376 return;
377 }
378 if (unlikely(packet->dccphtx_sent)) {
379 DCCP_WARN("no unsent packet in history!\n");
380 return; 385 return;
381 } 386 }
382 packet->dccphtx_tstamp = now; 387 dccp_tx_hist_add_entry(&hctx->ccid3hctx_hist, packet);
383 packet->dccphtx_seqno = dp->dccps_gss;
384 /*
385 * Check if win_count have changed
386 * Algorithm in "8.1. Window Counter Value" in RFC 4342.
387 */
388 quarter_rtt = timeval_delta(&now, &hctx->ccid3hctx_t_last_win_count);
389 if (likely(hctx->ccid3hctx_rtt > 8))
390 quarter_rtt /= hctx->ccid3hctx_rtt / 4;
391
392 if (quarter_rtt > 0) {
393 hctx->ccid3hctx_t_last_win_count = now;
394 hctx->ccid3hctx_last_win_count = (hctx->ccid3hctx_last_win_count +
395 min_t(unsigned long, quarter_rtt, 5)) % 16;
396 ccid3_pr_debug("%s, sk=%p, window changed from "
397 "%u to %u!\n",
398 dccp_role(sk), sk,
399 packet->dccphtx_ccval,
400 hctx->ccid3hctx_last_win_count);
401 }
402 388
403 hctx->ccid3hctx_idle = 0; 389 dccp_timestamp(sk, &now);
404 packet->dccphtx_rtt = hctx->ccid3hctx_rtt; 390 packet->dccphtx_tstamp = now;
405 packet->dccphtx_sent = 1; 391 packet->dccphtx_seqno = dccp_sk(sk)->dccps_gss;
392 packet->dccphtx_rtt = hctx->ccid3hctx_rtt;
393 packet->dccphtx_sent = 1;
394 hctx->ccid3hctx_idle = 0;
406} 395}
407 396
408static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) 397static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
@@ -414,7 +403,7 @@ static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
414 struct timeval now; 403 struct timeval now;
415 unsigned long t_nfb; 404 unsigned long t_nfb;
416 u32 pinv; 405 u32 pinv;
417 long r_sample, t_elapsed; 406 suseconds_t r_sample, t_elapsed;
418 407
419 BUG_ON(hctx == NULL); 408 BUG_ON(hctx == NULL);
420 409
@@ -430,44 +419,44 @@ static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
430 case TFRC_SSTATE_FBACK: 419 case TFRC_SSTATE_FBACK:
431 /* get packet from history to look up t_recvdata */ 420 /* get packet from history to look up t_recvdata */
432 packet = dccp_tx_hist_find_entry(&hctx->ccid3hctx_hist, 421 packet = dccp_tx_hist_find_entry(&hctx->ccid3hctx_hist,
433 DCCP_SKB_CB(skb)->dccpd_ack_seq); 422 DCCP_SKB_CB(skb)->dccpd_ack_seq);
434 if (unlikely(packet == NULL)) { 423 if (unlikely(packet == NULL)) {
435 DCCP_WARN("%s(%p), seqno %llu(%s) doesn't exist " 424 DCCP_WARN("%s(%p), seqno %llu(%s) doesn't exist "
436 "in history!\n", dccp_role(sk), sk, 425 "in history!\n", dccp_role(sk), sk,
437 (unsigned long long)DCCP_SKB_CB(skb)->dccpd_ack_seq, 426 (unsigned long long)DCCP_SKB_CB(skb)->dccpd_ack_seq,
438 dccp_packet_name(DCCP_SKB_CB(skb)->dccpd_type)); 427 dccp_packet_name(DCCP_SKB_CB(skb)->dccpd_type));
439 return; 428 return;
440 } 429 }
441 430
442 /* Update receive rate */ 431 /* Update receive rate in units of 64 * bytes/second */
443 hctx->ccid3hctx_x_recv = opt_recv->ccid3or_receive_rate; 432 hctx->ccid3hctx_x_recv = opt_recv->ccid3or_receive_rate;
433 hctx->ccid3hctx_x_recv <<= 6;
444 434
445 /* Update loss event rate */ 435 /* Update loss event rate */
446 pinv = opt_recv->ccid3or_loss_event_rate; 436 pinv = opt_recv->ccid3or_loss_event_rate;
447 if (pinv == ~0U || pinv == 0) 437 if (pinv == ~0U || pinv == 0) /* see RFC 4342, 8.5 */
448 hctx->ccid3hctx_p = 0; 438 hctx->ccid3hctx_p = 0;
449 else 439 else /* can not exceed 100% */
450 hctx->ccid3hctx_p = 1000000 / pinv; 440 hctx->ccid3hctx_p = 1000000 / pinv;
451 441
452 dccp_timestamp(sk, &now); 442 dccp_timestamp(sk, &now);
453 443
454 /* 444 /*
455 * Calculate new round trip sample as per [RFC 3448, 4.3] by 445 * Calculate new round trip sample as per [RFC 3448, 4.3] by
456 * R_sample = (now - t_recvdata) - t_elapsed 446 * R_sample = (now - t_recvdata) - t_elapsed
457 */ 447 */
458 r_sample = timeval_delta(&now, &packet->dccphtx_tstamp); 448 r_sample = timeval_delta(&now, &packet->dccphtx_tstamp);
459 t_elapsed = dp->dccps_options_received.dccpor_elapsed_time * 10; 449 t_elapsed = dp->dccps_options_received.dccpor_elapsed_time * 10;
460 450
461 if (unlikely(r_sample <= 0)) { 451 DCCP_BUG_ON(r_sample < 0);
462 DCCP_WARN("WARNING: R_sample (%ld) <= 0!\n", r_sample); 452 if (unlikely(r_sample <= t_elapsed))
463 r_sample = 0; 453 DCCP_WARN("WARNING: r_sample=%dus <= t_elapsed=%dus\n",
464 } else if (unlikely(r_sample <= t_elapsed)) 454 (int)r_sample, (int)t_elapsed);
465 DCCP_WARN("WARNING: r_sample=%ldus <= t_elapsed=%ldus\n",
466 r_sample, t_elapsed);
467 else 455 else
468 r_sample -= t_elapsed; 456 r_sample -= t_elapsed;
457 CCID3_RTT_SANITY_CHECK(r_sample);
469 458
470 /* Update RTT estimate by 459 /* Update RTT estimate by
471 * If (No feedback recv) 460 * If (No feedback recv)
472 * R = R_sample; 461 * R = R_sample;
473 * Else 462 * Else
@@ -476,34 +465,45 @@ static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
476 * q is a constant, RFC 3448 recomments 0.9 465 * q is a constant, RFC 3448 recomments 0.9
477 */ 466 */
478 if (hctx->ccid3hctx_state == TFRC_SSTATE_NO_FBACK) { 467 if (hctx->ccid3hctx_state == TFRC_SSTATE_NO_FBACK) {
479 /* Use Larger Initial Windows [RFC 4342, sec. 5] 468 /*
480 * We deviate in that we use `s' instead of `MSS'. */ 469 * Larger Initial Windows [RFC 4342, sec. 5]
481 u16 w_init = max( 4 * hctx->ccid3hctx_s, 470 * We deviate in that we use `s' instead of `MSS'.
482 max(2 * hctx->ccid3hctx_s, 4380)); 471 */
472 __u64 w_init = min(4 * hctx->ccid3hctx_s,
473 max(2 * hctx->ccid3hctx_s, 4380));
483 hctx->ccid3hctx_rtt = r_sample; 474 hctx->ccid3hctx_rtt = r_sample;
484 hctx->ccid3hctx_x = usecs_div(w_init, r_sample); 475 hctx->ccid3hctx_x = scaled_div(w_init << 6, r_sample);
485 hctx->ccid3hctx_t_ld = now; 476 hctx->ccid3hctx_t_ld = now;
486 477
487 ccid3_update_send_time(hctx); 478 ccid3_update_send_time(hctx);
488 479
489 ccid3_pr_debug("%s(%p), s=%u, w_init=%u, " 480 ccid3_pr_debug("%s(%p), s=%u, w_init=%llu, "
490 "R_sample=%ldus, X=%u\n", dccp_role(sk), 481 "R_sample=%dus, X=%u\n", dccp_role(sk),
491 sk, hctx->ccid3hctx_s, w_init, r_sample, 482 sk, hctx->ccid3hctx_s, w_init,
492 hctx->ccid3hctx_x); 483 (int)r_sample,
484 (unsigned)(hctx->ccid3hctx_x >> 6));
493 485
494 ccid3_hc_tx_set_state(sk, TFRC_SSTATE_FBACK); 486 ccid3_hc_tx_set_state(sk, TFRC_SSTATE_FBACK);
495 } else { 487 } else {
496 hctx->ccid3hctx_rtt = (9 * hctx->ccid3hctx_rtt + 488 hctx->ccid3hctx_rtt = (9 * hctx->ccid3hctx_rtt +
497 (u32)r_sample ) / 10; 489 (u32)r_sample) / 10;
498 490
491 /* Update sending rate (step 4 of [RFC 3448, 4.3]) */
492 if (hctx->ccid3hctx_p > 0)
493 hctx->ccid3hctx_x_calc =
494 tfrc_calc_x(hctx->ccid3hctx_s,
495 hctx->ccid3hctx_rtt,
496 hctx->ccid3hctx_p);
499 ccid3_hc_tx_update_x(sk, &now); 497 ccid3_hc_tx_update_x(sk, &now);
500 498
501 ccid3_pr_debug("%s(%p), RTT=%uus (sample=%ldus), s=%u, " 499 ccid3_pr_debug("%s(%p), RTT=%uus (sample=%dus), s=%u, "
502 "p=%u, X_calc=%u, X=%u\n", dccp_role(sk), 500 "p=%u, X_calc=%u, X_recv=%u, X=%u\n",
503 sk, hctx->ccid3hctx_rtt, r_sample, 501 dccp_role(sk),
502 sk, hctx->ccid3hctx_rtt, (int)r_sample,
504 hctx->ccid3hctx_s, hctx->ccid3hctx_p, 503 hctx->ccid3hctx_s, hctx->ccid3hctx_p,
505 hctx->ccid3hctx_x_calc, 504 hctx->ccid3hctx_x_calc,
506 hctx->ccid3hctx_x); 505 (unsigned)(hctx->ccid3hctx_x_recv >> 6),
506 (unsigned)(hctx->ccid3hctx_x >> 6));
507 } 507 }
508 508
509 /* unschedule no feedback timer */ 509 /* unschedule no feedback timer */
@@ -513,57 +513,48 @@ static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
513 dccp_tx_hist_purge_older(ccid3_tx_hist, 513 dccp_tx_hist_purge_older(ccid3_tx_hist,
514 &hctx->ccid3hctx_hist, packet); 514 &hctx->ccid3hctx_hist, packet);
515 /* 515 /*
516 * As we have calculated new ipi, delta, t_nom it is possible that 516 * As we have calculated new ipi, delta, t_nom it is possible
517 * we now can send a packet, so wake up dccp_wait_for_ccid 517 * that we now can send a packet, so wake up dccp_wait_for_ccid
518 */ 518 */
519 sk->sk_write_space(sk); 519 sk->sk_write_space(sk);
520 520
521 /* 521 /*
522 * Update timeout interval for the nofeedback timer. 522 * Update timeout interval for the nofeedback timer.
523 * We use a configuration option to increase the lower bound. 523 * We use a configuration option to increase the lower bound.
524 * This can help avoid triggering the nofeedback timer too often 524 * This can help avoid triggering the nofeedback timer too
525 * ('spinning') on LANs with small RTTs. 525 * often ('spinning') on LANs with small RTTs.
526 */ 526 */
527 hctx->ccid3hctx_t_rto = max_t(u32, 4 * hctx->ccid3hctx_rtt, 527 hctx->ccid3hctx_t_rto = max_t(u32, 4 * hctx->ccid3hctx_rtt,
528 CONFIG_IP_DCCP_CCID3_RTO * 528 CONFIG_IP_DCCP_CCID3_RTO *
529 (USEC_PER_SEC/1000) ); 529 (USEC_PER_SEC/1000));
530 /* 530 /*
531 * Schedule no feedback timer to expire in 531 * Schedule no feedback timer to expire in
532 * max(t_RTO, 2 * s/X) = max(t_RTO, 2 * t_ipi) 532 * max(t_RTO, 2 * s/X) = max(t_RTO, 2 * t_ipi)
533 */ 533 */
534 t_nfb = max(hctx->ccid3hctx_t_rto, 2 * hctx->ccid3hctx_t_ipi); 534 t_nfb = max(hctx->ccid3hctx_t_rto, 2 * hctx->ccid3hctx_t_ipi);
535 535
536 ccid3_pr_debug("%s, sk=%p, Scheduled no feedback timer to " 536 ccid3_pr_debug("%s(%p), Scheduled no feedback timer to "
537 "expire in %lu jiffies (%luus)\n", 537 "expire in %lu jiffies (%luus)\n",
538 dccp_role(sk), sk, 538 dccp_role(sk),
539 usecs_to_jiffies(t_nfb), t_nfb); 539 sk, usecs_to_jiffies(t_nfb), t_nfb);
540 540
541 sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer, 541 sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer,
542 jiffies + usecs_to_jiffies(t_nfb)); 542 jiffies + usecs_to_jiffies(t_nfb));
543 543
544 /* set idle flag */ 544 /* set idle flag */
545 hctx->ccid3hctx_idle = 1; 545 hctx->ccid3hctx_idle = 1;
546 break; 546 break;
547 case TFRC_SSTATE_NO_SENT: 547 case TFRC_SSTATE_NO_SENT:
548 if (dccp_sk(sk)->dccps_role == DCCP_ROLE_CLIENT) 548 /*
549 DCCP_WARN("Illegal ACK received - no packet sent\n"); 549 * XXX when implementing bidirectional rx/tx check this again
550 */
551 DCCP_WARN("Illegal ACK received - no packet sent\n");
550 /* fall through */ 552 /* fall through */
551 case TFRC_SSTATE_TERM: /* ignore feedback when closing */ 553 case TFRC_SSTATE_TERM: /* ignore feedback when closing */
552 break; 554 break;
553 } 555 }
554} 556}
555 557
556static int ccid3_hc_tx_insert_options(struct sock *sk, struct sk_buff *skb)
557{
558 const struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
559
560 BUG_ON(hctx == NULL);
561
562 if (sk->sk_state == DCCP_OPEN || sk->sk_state == DCCP_PARTOPEN)
563 DCCP_SKB_CB(skb)->dccpd_ccval = hctx->ccid3hctx_last_win_count;
564 return 0;
565}
566
567static int ccid3_hc_tx_parse_options(struct sock *sk, unsigned char option, 558static int ccid3_hc_tx_parse_options(struct sock *sk, unsigned char option,
568 unsigned char len, u16 idx, 559 unsigned char len, u16 idx,
569 unsigned char *value) 560 unsigned char *value)
@@ -588,13 +579,14 @@ static int ccid3_hc_tx_parse_options(struct sock *sk, unsigned char option,
588 switch (option) { 579 switch (option) {
589 case TFRC_OPT_LOSS_EVENT_RATE: 580 case TFRC_OPT_LOSS_EVENT_RATE:
590 if (unlikely(len != 4)) { 581 if (unlikely(len != 4)) {
591 DCCP_WARN("%s, sk=%p, invalid len %d " 582 DCCP_WARN("%s(%p), invalid len %d "
592 "for TFRC_OPT_LOSS_EVENT_RATE\n", 583 "for TFRC_OPT_LOSS_EVENT_RATE\n",
593 dccp_role(sk), sk, len); 584 dccp_role(sk), sk, len);
594 rc = -EINVAL; 585 rc = -EINVAL;
595 } else { 586 } else {
596 opt_recv->ccid3or_loss_event_rate = ntohl(*(__be32 *)value); 587 opt_recv->ccid3or_loss_event_rate =
597 ccid3_pr_debug("%s, sk=%p, LOSS_EVENT_RATE=%u\n", 588 ntohl(*(__be32 *)value);
589 ccid3_pr_debug("%s(%p), LOSS_EVENT_RATE=%u\n",
598 dccp_role(sk), sk, 590 dccp_role(sk), sk,
599 opt_recv->ccid3or_loss_event_rate); 591 opt_recv->ccid3or_loss_event_rate);
600 } 592 }
@@ -602,20 +594,21 @@ static int ccid3_hc_tx_parse_options(struct sock *sk, unsigned char option,
602 case TFRC_OPT_LOSS_INTERVALS: 594 case TFRC_OPT_LOSS_INTERVALS:
603 opt_recv->ccid3or_loss_intervals_idx = idx; 595 opt_recv->ccid3or_loss_intervals_idx = idx;
604 opt_recv->ccid3or_loss_intervals_len = len; 596 opt_recv->ccid3or_loss_intervals_len = len;
605 ccid3_pr_debug("%s, sk=%p, LOSS_INTERVALS=(%u, %u)\n", 597 ccid3_pr_debug("%s(%p), LOSS_INTERVALS=(%u, %u)\n",
606 dccp_role(sk), sk, 598 dccp_role(sk), sk,
607 opt_recv->ccid3or_loss_intervals_idx, 599 opt_recv->ccid3or_loss_intervals_idx,
608 opt_recv->ccid3or_loss_intervals_len); 600 opt_recv->ccid3or_loss_intervals_len);
609 break; 601 break;
610 case TFRC_OPT_RECEIVE_RATE: 602 case TFRC_OPT_RECEIVE_RATE:
611 if (unlikely(len != 4)) { 603 if (unlikely(len != 4)) {
612 DCCP_WARN("%s, sk=%p, invalid len %d " 604 DCCP_WARN("%s(%p), invalid len %d "
613 "for TFRC_OPT_RECEIVE_RATE\n", 605 "for TFRC_OPT_RECEIVE_RATE\n",
614 dccp_role(sk), sk, len); 606 dccp_role(sk), sk, len);
615 rc = -EINVAL; 607 rc = -EINVAL;
616 } else { 608 } else {
617 opt_recv->ccid3or_receive_rate = ntohl(*(__be32 *)value); 609 opt_recv->ccid3or_receive_rate =
618 ccid3_pr_debug("%s, sk=%p, RECEIVE_RATE=%u\n", 610 ntohl(*(__be32 *)value);
611 ccid3_pr_debug("%s(%p), RECEIVE_RATE=%u\n",
619 dccp_role(sk), sk, 612 dccp_role(sk), sk,
620 opt_recv->ccid3or_receive_rate); 613 opt_recv->ccid3or_receive_rate);
621 } 614 }
@@ -630,10 +623,12 @@ static int ccid3_hc_tx_init(struct ccid *ccid, struct sock *sk)
630 struct ccid3_hc_tx_sock *hctx = ccid_priv(ccid); 623 struct ccid3_hc_tx_sock *hctx = ccid_priv(ccid);
631 624
632 hctx->ccid3hctx_s = 0; 625 hctx->ccid3hctx_s = 0;
626 hctx->ccid3hctx_rtt = 0;
633 hctx->ccid3hctx_state = TFRC_SSTATE_NO_SENT; 627 hctx->ccid3hctx_state = TFRC_SSTATE_NO_SENT;
634 INIT_LIST_HEAD(&hctx->ccid3hctx_hist); 628 INIT_LIST_HEAD(&hctx->ccid3hctx_hist);
635 629
636 hctx->ccid3hctx_no_feedback_timer.function = ccid3_hc_tx_no_feedback_timer; 630 hctx->ccid3hctx_no_feedback_timer.function =
631 ccid3_hc_tx_no_feedback_timer;
637 hctx->ccid3hctx_no_feedback_timer.data = (unsigned long)sk; 632 hctx->ccid3hctx_no_feedback_timer.data = (unsigned long)sk;
638 init_timer(&hctx->ccid3hctx_no_feedback_timer); 633 init_timer(&hctx->ccid3hctx_no_feedback_timer);
639 634
@@ -698,8 +693,9 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk)
698 struct dccp_sock *dp = dccp_sk(sk); 693 struct dccp_sock *dp = dccp_sk(sk);
699 struct dccp_rx_hist_entry *packet; 694 struct dccp_rx_hist_entry *packet;
700 struct timeval now; 695 struct timeval now;
696 suseconds_t delta;
701 697
702 ccid3_pr_debug("%s, sk=%p\n", dccp_role(sk), sk); 698 ccid3_pr_debug("%s(%p) - entry \n", dccp_role(sk), sk);
703 699
704 dccp_timestamp(sk, &now); 700 dccp_timestamp(sk, &now);
705 701
@@ -707,21 +703,21 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk)
707 case TFRC_RSTATE_NO_DATA: 703 case TFRC_RSTATE_NO_DATA:
708 hcrx->ccid3hcrx_x_recv = 0; 704 hcrx->ccid3hcrx_x_recv = 0;
709 break; 705 break;
710 case TFRC_RSTATE_DATA: { 706 case TFRC_RSTATE_DATA:
711 const u32 delta = timeval_delta(&now, 707 delta = timeval_delta(&now,
712 &hcrx->ccid3hcrx_tstamp_last_feedback); 708 &hcrx->ccid3hcrx_tstamp_last_feedback);
713 hcrx->ccid3hcrx_x_recv = usecs_div(hcrx->ccid3hcrx_bytes_recv, 709 DCCP_BUG_ON(delta < 0);
714 delta); 710 hcrx->ccid3hcrx_x_recv =
715 } 711 scaled_div32(hcrx->ccid3hcrx_bytes_recv, delta);
716 break; 712 break;
717 case TFRC_RSTATE_TERM: 713 case TFRC_RSTATE_TERM:
718 DCCP_BUG("Illegal %s state TERM, sk=%p", dccp_role(sk), sk); 714 DCCP_BUG("%s(%p) - Illegal state TERM", dccp_role(sk), sk);
719 return; 715 return;
720 } 716 }
721 717
722 packet = dccp_rx_hist_find_data_packet(&hcrx->ccid3hcrx_hist); 718 packet = dccp_rx_hist_find_data_packet(&hcrx->ccid3hcrx_hist);
723 if (unlikely(packet == NULL)) { 719 if (unlikely(packet == NULL)) {
724 DCCP_WARN("%s, sk=%p, no data packet in history!\n", 720 DCCP_WARN("%s(%p), no data packet in history!\n",
725 dccp_role(sk), sk); 721 dccp_role(sk), sk);
726 return; 722 return;
727 } 723 }
@@ -730,13 +726,19 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk)
730 hcrx->ccid3hcrx_ccval_last_counter = packet->dccphrx_ccval; 726 hcrx->ccid3hcrx_ccval_last_counter = packet->dccphrx_ccval;
731 hcrx->ccid3hcrx_bytes_recv = 0; 727 hcrx->ccid3hcrx_bytes_recv = 0;
732 728
733 /* Convert to multiples of 10us */ 729 /* Elapsed time information [RFC 4340, 13.2] in units of 10 * usecs */
734 hcrx->ccid3hcrx_elapsed_time = 730 delta = timeval_delta(&now, &packet->dccphrx_tstamp);
735 timeval_delta(&now, &packet->dccphrx_tstamp) / 10; 731 DCCP_BUG_ON(delta < 0);
732 hcrx->ccid3hcrx_elapsed_time = delta / 10;
733
736 if (hcrx->ccid3hcrx_p == 0) 734 if (hcrx->ccid3hcrx_p == 0)
737 hcrx->ccid3hcrx_pinv = ~0; 735 hcrx->ccid3hcrx_pinv = ~0U; /* see RFC 4342, 8.5 */
738 else 736 else if (hcrx->ccid3hcrx_p > 1000000) {
737 DCCP_WARN("p (%u) > 100%%\n", hcrx->ccid3hcrx_p);
738 hcrx->ccid3hcrx_pinv = 1; /* use 100% in this case */
739 } else
739 hcrx->ccid3hcrx_pinv = 1000000 / hcrx->ccid3hcrx_p; 740 hcrx->ccid3hcrx_pinv = 1000000 / hcrx->ccid3hcrx_p;
741
740 dp->dccps_hc_rx_insert_options = 1; 742 dp->dccps_hc_rx_insert_options = 1;
741 dccp_send_ack(sk); 743 dccp_send_ack(sk);
742} 744}
@@ -764,9 +766,9 @@ static int ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb)
764 hcrx->ccid3hcrx_elapsed_time)) || 766 hcrx->ccid3hcrx_elapsed_time)) ||
765 dccp_insert_option_timestamp(sk, skb) || 767 dccp_insert_option_timestamp(sk, skb) ||
766 dccp_insert_option(sk, skb, TFRC_OPT_LOSS_EVENT_RATE, 768 dccp_insert_option(sk, skb, TFRC_OPT_LOSS_EVENT_RATE,
767 &pinv, sizeof(pinv)) || 769 &pinv, sizeof(pinv)) ||
768 dccp_insert_option(sk, skb, TFRC_OPT_RECEIVE_RATE, 770 dccp_insert_option(sk, skb, TFRC_OPT_RECEIVE_RATE,
769 &x_recv, sizeof(x_recv))) 771 &x_recv, sizeof(x_recv)))
770 return -1; 772 return -1;
771 773
772 return 0; 774 return 0;
@@ -780,12 +782,13 @@ static u32 ccid3_hc_rx_calc_first_li(struct sock *sk)
780{ 782{
781 struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk); 783 struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
782 struct dccp_rx_hist_entry *entry, *next, *tail = NULL; 784 struct dccp_rx_hist_entry *entry, *next, *tail = NULL;
783 u32 rtt, delta, x_recv, fval, p, tmp2; 785 u32 x_recv, p;
786 suseconds_t rtt, delta;
784 struct timeval tstamp = { 0, }; 787 struct timeval tstamp = { 0, };
785 int interval = 0; 788 int interval = 0;
786 int win_count = 0; 789 int win_count = 0;
787 int step = 0; 790 int step = 0;
788 u64 tmp1; 791 u64 fval;
789 792
790 list_for_each_entry_safe(entry, next, &hcrx->ccid3hcrx_hist, 793 list_for_each_entry_safe(entry, next, &hcrx->ccid3hcrx_hist,
791 dccphrx_node) { 794 dccphrx_node) {
@@ -810,13 +813,13 @@ static u32 ccid3_hc_rx_calc_first_li(struct sock *sk)
810 } 813 }
811 814
812 if (unlikely(step == 0)) { 815 if (unlikely(step == 0)) {
813 DCCP_WARN("%s, sk=%p, packet history has no data packets!\n", 816 DCCP_WARN("%s(%p), packet history has no data packets!\n",
814 dccp_role(sk), sk); 817 dccp_role(sk), sk);
815 return ~0; 818 return ~0;
816 } 819 }
817 820
818 if (unlikely(interval == 0)) { 821 if (unlikely(interval == 0)) {
819 DCCP_WARN("%s, sk=%p, Could not find a win_count interval > 0." 822 DCCP_WARN("%s(%p), Could not find a win_count interval > 0."
820 "Defaulting to 1\n", dccp_role(sk), sk); 823 "Defaulting to 1\n", dccp_role(sk), sk);
821 interval = 1; 824 interval = 1;
822 } 825 }
@@ -825,41 +828,51 @@ found:
825 DCCP_CRIT("tail is null\n"); 828 DCCP_CRIT("tail is null\n");
826 return ~0; 829 return ~0;
827 } 830 }
828 rtt = timeval_delta(&tstamp, &tail->dccphrx_tstamp) * 4 / interval;
829 ccid3_pr_debug("%s, sk=%p, approximated RTT to %uus\n",
830 dccp_role(sk), sk, rtt);
831 831
832 if (rtt == 0) { 832 delta = timeval_delta(&tstamp, &tail->dccphrx_tstamp);
833 DCCP_WARN("RTT==0, setting to 1\n"); 833 DCCP_BUG_ON(delta < 0);
834 rtt = 1; 834
835 rtt = delta * 4 / interval;
836 ccid3_pr_debug("%s(%p), approximated RTT to %dus\n",
837 dccp_role(sk), sk, (int)rtt);
838
839 /*
840 * Determine the length of the first loss interval via inverse lookup.
841 * Assume that X_recv can be computed by the throughput equation
842 * s
843 * X_recv = --------
844 * R * fval
845 * Find some p such that f(p) = fval; return 1/p [RFC 3448, 6.3.1].
846 */
847 if (rtt == 0) { /* would result in divide-by-zero */
848 DCCP_WARN("RTT==0, returning 1/p = 1\n");
849 return 1000000;
835 } 850 }
836 851
837 dccp_timestamp(sk, &tstamp); 852 dccp_timestamp(sk, &tstamp);
838 delta = timeval_delta(&tstamp, &hcrx->ccid3hcrx_tstamp_last_feedback); 853 delta = timeval_delta(&tstamp, &hcrx->ccid3hcrx_tstamp_last_feedback);
839 x_recv = usecs_div(hcrx->ccid3hcrx_bytes_recv, delta); 854 DCCP_BUG_ON(delta <= 0);
840 855
841 if (x_recv == 0) 856 x_recv = scaled_div32(hcrx->ccid3hcrx_bytes_recv, delta);
842 x_recv = hcrx->ccid3hcrx_x_recv; 857 if (x_recv == 0) { /* would also trigger divide-by-zero */
843 858 DCCP_WARN("X_recv==0\n");
844 tmp1 = (u64)x_recv * (u64)rtt; 859 if ((x_recv = hcrx->ccid3hcrx_x_recv) == 0) {
845 do_div(tmp1,10000000); 860 DCCP_BUG("stored value of X_recv is zero");
846 tmp2 = (u32)tmp1; 861 return 1000000;
847 862 }
848 if (!tmp2) {
849 DCCP_CRIT("tmp2 = 0, x_recv = %u, rtt =%u\n", x_recv, rtt);
850 return ~0;
851 } 863 }
852 864
853 fval = (hcrx->ccid3hcrx_s * 100000) / tmp2; 865 fval = scaled_div(hcrx->ccid3hcrx_s, rtt);
854 /* do not alter order above or you will get overflow on 32 bit */ 866 fval = scaled_div32(fval, x_recv);
855 p = tfrc_calc_x_reverse_lookup(fval); 867 p = tfrc_calc_x_reverse_lookup(fval);
856 ccid3_pr_debug("%s, sk=%p, receive rate=%u bytes/s, implied " 868
869 ccid3_pr_debug("%s(%p), receive rate=%u bytes/s, implied "
857 "loss rate=%u\n", dccp_role(sk), sk, x_recv, p); 870 "loss rate=%u\n", dccp_role(sk), sk, x_recv, p);
858 871
859 if (p == 0) 872 if (p == 0)
860 return ~0; 873 return ~0;
861 else 874 else
862 return 1000000 / p; 875 return 1000000 / p;
863} 876}
864 877
865static void ccid3_hc_rx_update_li(struct sock *sk, u64 seq_loss, u8 win_loss) 878static void ccid3_hc_rx_update_li(struct sock *sk, u64 seq_loss, u8 win_loss)
@@ -913,7 +926,8 @@ static int ccid3_hc_rx_detect_loss(struct sock *sk,
913 struct dccp_rx_hist_entry *packet) 926 struct dccp_rx_hist_entry *packet)
914{ 927{
915 struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk); 928 struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
916 struct dccp_rx_hist_entry *rx_hist = dccp_rx_hist_head(&hcrx->ccid3hcrx_hist); 929 struct dccp_rx_hist_entry *rx_hist =
930 dccp_rx_hist_head(&hcrx->ccid3hcrx_hist);
917 u64 seqno = packet->dccphrx_seqno; 931 u64 seqno = packet->dccphrx_seqno;
918 u64 tmp_seqno; 932 u64 tmp_seqno;
919 int loss = 0; 933 int loss = 0;
@@ -941,7 +955,7 @@ static int ccid3_hc_rx_detect_loss(struct sock *sk,
941 dccp_inc_seqno(&tmp_seqno); 955 dccp_inc_seqno(&tmp_seqno);
942 while (dccp_rx_hist_find_entry(&hcrx->ccid3hcrx_hist, 956 while (dccp_rx_hist_find_entry(&hcrx->ccid3hcrx_hist,
943 tmp_seqno, &ccval)) { 957 tmp_seqno, &ccval)) {
944 hcrx->ccid3hcrx_seqno_nonloss = tmp_seqno; 958 hcrx->ccid3hcrx_seqno_nonloss = tmp_seqno;
945 hcrx->ccid3hcrx_ccval_nonloss = ccval; 959 hcrx->ccid3hcrx_ccval_nonloss = ccval;
946 dccp_inc_seqno(&tmp_seqno); 960 dccp_inc_seqno(&tmp_seqno);
947 } 961 }
@@ -967,7 +981,8 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
967 const struct dccp_options_received *opt_recv; 981 const struct dccp_options_received *opt_recv;
968 struct dccp_rx_hist_entry *packet; 982 struct dccp_rx_hist_entry *packet;
969 struct timeval now; 983 struct timeval now;
970 u32 p_prev, rtt_prev, r_sample, t_elapsed; 984 u32 p_prev, rtt_prev;
985 suseconds_t r_sample, t_elapsed;
971 int loss, payload_size; 986 int loss, payload_size;
972 987
973 BUG_ON(hcrx == NULL); 988 BUG_ON(hcrx == NULL);
@@ -987,11 +1002,13 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
987 r_sample = timeval_usecs(&now); 1002 r_sample = timeval_usecs(&now);
988 t_elapsed = opt_recv->dccpor_elapsed_time * 10; 1003 t_elapsed = opt_recv->dccpor_elapsed_time * 10;
989 1004
1005 DCCP_BUG_ON(r_sample < 0);
990 if (unlikely(r_sample <= t_elapsed)) 1006 if (unlikely(r_sample <= t_elapsed))
991 DCCP_WARN("r_sample=%uus, t_elapsed=%uus\n", 1007 DCCP_WARN("r_sample=%ldus, t_elapsed=%ldus\n",
992 r_sample, t_elapsed); 1008 r_sample, t_elapsed);
993 else 1009 else
994 r_sample -= t_elapsed; 1010 r_sample -= t_elapsed;
1011 CCID3_RTT_SANITY_CHECK(r_sample);
995 1012
996 if (hcrx->ccid3hcrx_state == TFRC_RSTATE_NO_DATA) 1013 if (hcrx->ccid3hcrx_state == TFRC_RSTATE_NO_DATA)
997 hcrx->ccid3hcrx_rtt = r_sample; 1014 hcrx->ccid3hcrx_rtt = r_sample;
@@ -1000,8 +1017,8 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
1000 r_sample / 10; 1017 r_sample / 10;
1001 1018
1002 if (rtt_prev != hcrx->ccid3hcrx_rtt) 1019 if (rtt_prev != hcrx->ccid3hcrx_rtt)
1003 ccid3_pr_debug("%s, New RTT=%uus, elapsed time=%u\n", 1020 ccid3_pr_debug("%s(%p), New RTT=%uus, elapsed time=%u\n",
1004 dccp_role(sk), hcrx->ccid3hcrx_rtt, 1021 dccp_role(sk), sk, hcrx->ccid3hcrx_rtt,
1005 opt_recv->dccpor_elapsed_time); 1022 opt_recv->dccpor_elapsed_time);
1006 break; 1023 break;
1007 case DCCP_PKT_DATA: 1024 case DCCP_PKT_DATA:
@@ -1013,7 +1030,7 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
1013 packet = dccp_rx_hist_entry_new(ccid3_rx_hist, sk, opt_recv->dccpor_ndp, 1030 packet = dccp_rx_hist_entry_new(ccid3_rx_hist, sk, opt_recv->dccpor_ndp,
1014 skb, GFP_ATOMIC); 1031 skb, GFP_ATOMIC);
1015 if (unlikely(packet == NULL)) { 1032 if (unlikely(packet == NULL)) {
1016 DCCP_WARN("%s, sk=%p, Not enough mem to add rx packet " 1033 DCCP_WARN("%s(%p), Not enough mem to add rx packet "
1017 "to history, consider it lost!\n", dccp_role(sk), sk); 1034 "to history, consider it lost!\n", dccp_role(sk), sk);
1018 return; 1035 return;
1019 } 1036 }
@@ -1028,9 +1045,8 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
1028 1045
1029 switch (hcrx->ccid3hcrx_state) { 1046 switch (hcrx->ccid3hcrx_state) {
1030 case TFRC_RSTATE_NO_DATA: 1047 case TFRC_RSTATE_NO_DATA:
1031 ccid3_pr_debug("%s, sk=%p(%s), skb=%p, sending initial " 1048 ccid3_pr_debug("%s(%p, state=%s), skb=%p, sending initial "
1032 "feedback\n", 1049 "feedback\n", dccp_role(sk), sk,
1033 dccp_role(sk), sk,
1034 dccp_state_name(sk->sk_state), skb); 1050 dccp_state_name(sk->sk_state), skb);
1035 ccid3_hc_rx_send_feedback(sk); 1051 ccid3_hc_rx_send_feedback(sk);
1036 ccid3_hc_rx_set_state(sk, TFRC_RSTATE_DATA); 1052 ccid3_hc_rx_set_state(sk, TFRC_RSTATE_DATA);
@@ -1041,19 +1057,19 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
1041 break; 1057 break;
1042 1058
1043 dccp_timestamp(sk, &now); 1059 dccp_timestamp(sk, &now);
1044 if (timeval_delta(&now, &hcrx->ccid3hcrx_tstamp_last_ack) >= 1060 if ((timeval_delta(&now, &hcrx->ccid3hcrx_tstamp_last_ack) -
1045 hcrx->ccid3hcrx_rtt) { 1061 (suseconds_t)hcrx->ccid3hcrx_rtt) >= 0) {
1046 hcrx->ccid3hcrx_tstamp_last_ack = now; 1062 hcrx->ccid3hcrx_tstamp_last_ack = now;
1047 ccid3_hc_rx_send_feedback(sk); 1063 ccid3_hc_rx_send_feedback(sk);
1048 } 1064 }
1049 return; 1065 return;
1050 case TFRC_RSTATE_TERM: 1066 case TFRC_RSTATE_TERM:
1051 DCCP_BUG("Illegal %s state TERM, sk=%p", dccp_role(sk), sk); 1067 DCCP_BUG("%s(%p) - Illegal state TERM", dccp_role(sk), sk);
1052 return; 1068 return;
1053 } 1069 }
1054 1070
1055 /* Dealing with packet loss */ 1071 /* Dealing with packet loss */
1056 ccid3_pr_debug("%s, sk=%p(%s), data loss! Reacting...\n", 1072 ccid3_pr_debug("%s(%p, state=%s), data loss! Reacting...\n",
1057 dccp_role(sk), sk, dccp_state_name(sk->sk_state)); 1073 dccp_role(sk), sk, dccp_state_name(sk->sk_state));
1058 1074
1059 p_prev = hcrx->ccid3hcrx_p; 1075 p_prev = hcrx->ccid3hcrx_p;
@@ -1078,7 +1094,7 @@ static int ccid3_hc_rx_init(struct ccid *ccid, struct sock *sk)
1078{ 1094{
1079 struct ccid3_hc_rx_sock *hcrx = ccid_priv(ccid); 1095 struct ccid3_hc_rx_sock *hcrx = ccid_priv(ccid);
1080 1096
1081 ccid3_pr_debug("%s, sk=%p\n", dccp_role(sk), sk); 1097 ccid3_pr_debug("entry\n");
1082 1098
1083 hcrx->ccid3hcrx_state = TFRC_RSTATE_NO_DATA; 1099 hcrx->ccid3hcrx_state = TFRC_RSTATE_NO_DATA;
1084 INIT_LIST_HEAD(&hcrx->ccid3hcrx_hist); 1100 INIT_LIST_HEAD(&hcrx->ccid3hcrx_hist);
@@ -1086,7 +1102,7 @@ static int ccid3_hc_rx_init(struct ccid *ccid, struct sock *sk)
1086 dccp_timestamp(sk, &hcrx->ccid3hcrx_tstamp_last_ack); 1102 dccp_timestamp(sk, &hcrx->ccid3hcrx_tstamp_last_ack);
1087 hcrx->ccid3hcrx_tstamp_last_feedback = hcrx->ccid3hcrx_tstamp_last_ack; 1103 hcrx->ccid3hcrx_tstamp_last_feedback = hcrx->ccid3hcrx_tstamp_last_ack;
1088 hcrx->ccid3hcrx_s = 0; 1104 hcrx->ccid3hcrx_s = 0;
1089 hcrx->ccid3hcrx_rtt = 5000; /* XXX 5ms for now... */ 1105 hcrx->ccid3hcrx_rtt = 0;
1090 return 0; 1106 return 0;
1091} 1107}
1092 1108
@@ -1115,9 +1131,9 @@ static void ccid3_hc_rx_get_info(struct sock *sk, struct tcp_info *info)
1115 1131
1116 BUG_ON(hcrx == NULL); 1132 BUG_ON(hcrx == NULL);
1117 1133
1118 info->tcpi_ca_state = hcrx->ccid3hcrx_state; 1134 info->tcpi_ca_state = hcrx->ccid3hcrx_state;
1119 info->tcpi_options |= TCPI_OPT_TIMESTAMPS; 1135 info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
1120 info->tcpi_rcv_rtt = hcrx->ccid3hcrx_rtt; 1136 info->tcpi_rcv_rtt = hcrx->ccid3hcrx_rtt;
1121} 1137}
1122 1138
1123static void ccid3_hc_tx_get_info(struct sock *sk, struct tcp_info *info) 1139static void ccid3_hc_tx_get_info(struct sock *sk, struct tcp_info *info)
@@ -1198,7 +1214,6 @@ static struct ccid_operations ccid3 = {
1198 .ccid_hc_tx_send_packet = ccid3_hc_tx_send_packet, 1214 .ccid_hc_tx_send_packet = ccid3_hc_tx_send_packet,
1199 .ccid_hc_tx_packet_sent = ccid3_hc_tx_packet_sent, 1215 .ccid_hc_tx_packet_sent = ccid3_hc_tx_packet_sent,
1200 .ccid_hc_tx_packet_recv = ccid3_hc_tx_packet_recv, 1216 .ccid_hc_tx_packet_recv = ccid3_hc_tx_packet_recv,
1201 .ccid_hc_tx_insert_options = ccid3_hc_tx_insert_options,
1202 .ccid_hc_tx_parse_options = ccid3_hc_tx_parse_options, 1217 .ccid_hc_tx_parse_options = ccid3_hc_tx_parse_options,
1203 .ccid_hc_rx_obj_size = sizeof(struct ccid3_hc_rx_sock), 1218 .ccid_hc_rx_obj_size = sizeof(struct ccid3_hc_rx_sock),
1204 .ccid_hc_rx_init = ccid3_hc_rx_init, 1219 .ccid_hc_rx_init = ccid3_hc_rx_init,
@@ -1210,7 +1225,7 @@ static struct ccid_operations ccid3 = {
1210 .ccid_hc_rx_getsockopt = ccid3_hc_rx_getsockopt, 1225 .ccid_hc_rx_getsockopt = ccid3_hc_rx_getsockopt,
1211 .ccid_hc_tx_getsockopt = ccid3_hc_tx_getsockopt, 1226 .ccid_hc_tx_getsockopt = ccid3_hc_tx_getsockopt,
1212}; 1227};
1213 1228
1214#ifdef CONFIG_IP_DCCP_CCID3_DEBUG 1229#ifdef CONFIG_IP_DCCP_CCID3_DEBUG
1215module_param(ccid3_debug, int, 0444); 1230module_param(ccid3_debug, int, 0444);
1216MODULE_PARM_DESC(ccid3_debug, "Enable debug messages"); 1231MODULE_PARM_DESC(ccid3_debug, "Enable debug messages");
@@ -1233,7 +1248,7 @@ static __init int ccid3_module_init(void)
1233 goto out_free_tx; 1248 goto out_free_tx;
1234 1249
1235 rc = ccid_register(&ccid3); 1250 rc = ccid_register(&ccid3);
1236 if (rc != 0) 1251 if (rc != 0)
1237 goto out_free_loss_interval_history; 1252 goto out_free_loss_interval_history;
1238out: 1253out:
1239 return rc; 1254 return rc;
diff --git a/net/dccp/ccids/ccid3.h b/net/dccp/ccids/ccid3.h
index 07596d704ef..15776a88c09 100644
--- a/net/dccp/ccids/ccid3.h
+++ b/net/dccp/ccids/ccid3.h
@@ -51,6 +51,16 @@
51/* Parameter t_mbi from [RFC 3448, 4.3]: backoff interval in seconds */ 51/* Parameter t_mbi from [RFC 3448, 4.3]: backoff interval in seconds */
52#define TFRC_T_MBI 64 52#define TFRC_T_MBI 64
53 53
54/* What we think is a reasonable upper limit on RTT values */
55#define CCID3_SANE_RTT_MAX ((suseconds_t)(4 * USEC_PER_SEC))
56
57#define CCID3_RTT_SANITY_CHECK(rtt) do { \
58 if (rtt > CCID3_SANE_RTT_MAX) { \
59 DCCP_CRIT("RTT (%d) too large, substituting %d", \
60 (int)rtt, (int)CCID3_SANE_RTT_MAX); \
61 rtt = CCID3_SANE_RTT_MAX; \
62 } } while (0)
63
54enum ccid3_options { 64enum ccid3_options {
55 TFRC_OPT_LOSS_EVENT_RATE = 192, 65 TFRC_OPT_LOSS_EVENT_RATE = 192,
56 TFRC_OPT_LOSS_INTERVALS = 193, 66 TFRC_OPT_LOSS_INTERVALS = 193,
@@ -67,7 +77,7 @@ struct ccid3_options_received {
67 77
68/* TFRC sender states */ 78/* TFRC sender states */
69enum ccid3_hc_tx_states { 79enum ccid3_hc_tx_states {
70 TFRC_SSTATE_NO_SENT = 1, 80 TFRC_SSTATE_NO_SENT = 1,
71 TFRC_SSTATE_NO_FBACK, 81 TFRC_SSTATE_NO_FBACK,
72 TFRC_SSTATE_FBACK, 82 TFRC_SSTATE_FBACK,
73 TFRC_SSTATE_TERM, 83 TFRC_SSTATE_TERM,
@@ -75,23 +85,23 @@ enum ccid3_hc_tx_states {
75 85
76/** struct ccid3_hc_tx_sock - CCID3 sender half-connection socket 86/** struct ccid3_hc_tx_sock - CCID3 sender half-connection socket
77 * 87 *
78 * @ccid3hctx_x - Current sending rate 88 * @ccid3hctx_x - Current sending rate in 64 * bytes per second
79 * @ccid3hctx_x_recv - Receive rate 89 * @ccid3hctx_x_recv - Receive rate in 64 * bytes per second
80 * @ccid3hctx_x_calc - Calculated send rate (RFC 3448, 3.1) 90 * @ccid3hctx_x_calc - Calculated rate in bytes per second
81 * @ccid3hctx_rtt - Estimate of current round trip time in usecs 91 * @ccid3hctx_rtt - Estimate of current round trip time in usecs
82 * @ccid3hctx_p - Current loss event rate (0-1) scaled by 1000000 92 * @ccid3hctx_p - Current loss event rate (0-1) scaled by 1000000
83 * @ccid3hctx_s - Packet size 93 * @ccid3hctx_s - Packet size in bytes
84 * @ccid3hctx_t_rto - Retransmission Timeout (RFC 3448, 3.1) 94 * @ccid3hctx_t_rto - Nofeedback Timer setting in usecs
85 * @ccid3hctx_t_ipi - Interpacket (send) interval (RFC 3448, 4.6) 95 * @ccid3hctx_t_ipi - Interpacket (send) interval (RFC 3448, 4.6) in usecs
86 * @ccid3hctx_state - Sender state, one of %ccid3_hc_tx_states 96 * @ccid3hctx_state - Sender state, one of %ccid3_hc_tx_states
87 * @ccid3hctx_last_win_count - Last window counter sent 97 * @ccid3hctx_last_win_count - Last window counter sent
88 * @ccid3hctx_t_last_win_count - Timestamp of earliest packet 98 * @ccid3hctx_t_last_win_count - Timestamp of earliest packet
89 * with last_win_count value sent 99 * with last_win_count value sent
90 * @ccid3hctx_no_feedback_timer - Handle to no feedback timer 100 * @ccid3hctx_no_feedback_timer - Handle to no feedback timer
91 * @ccid3hctx_idle - Flag indicating that sender is idling 101 * @ccid3hctx_idle - Flag indicating that sender is idling
92 * @ccid3hctx_t_ld - Time last doubled during slow start 102 * @ccid3hctx_t_ld - Time last doubled during slow start
93 * @ccid3hctx_t_nom - Nominal send time of next packet 103 * @ccid3hctx_t_nom - Nominal send time of next packet
94 * @ccid3hctx_delta - Send timer delta 104 * @ccid3hctx_delta - Send timer delta (RFC 3448, 4.6) in usecs
95 * @ccid3hctx_hist - Packet history 105 * @ccid3hctx_hist - Packet history
96 * @ccid3hctx_options_received - Parsed set of retrieved options 106 * @ccid3hctx_options_received - Parsed set of retrieved options
97 */ 107 */
@@ -105,7 +115,7 @@ struct ccid3_hc_tx_sock {
105#define ccid3hctx_t_rto ccid3hctx_tfrc.tfrctx_rto 115#define ccid3hctx_t_rto ccid3hctx_tfrc.tfrctx_rto
106#define ccid3hctx_t_ipi ccid3hctx_tfrc.tfrctx_ipi 116#define ccid3hctx_t_ipi ccid3hctx_tfrc.tfrctx_ipi
107 u16 ccid3hctx_s; 117 u16 ccid3hctx_s;
108 enum ccid3_hc_tx_states ccid3hctx_state:8; 118 enum ccid3_hc_tx_states ccid3hctx_state:8;
109 u8 ccid3hctx_last_win_count; 119 u8 ccid3hctx_last_win_count;
110 u8 ccid3hctx_idle; 120 u8 ccid3hctx_idle;
111 struct timeval ccid3hctx_t_last_win_count; 121 struct timeval ccid3hctx_t_last_win_count;
@@ -119,7 +129,7 @@ struct ccid3_hc_tx_sock {
119 129
120/* TFRC receiver states */ 130/* TFRC receiver states */
121enum ccid3_hc_rx_states { 131enum ccid3_hc_rx_states {
122 TFRC_RSTATE_NO_DATA = 1, 132 TFRC_RSTATE_NO_DATA = 1,
123 TFRC_RSTATE_DATA, 133 TFRC_RSTATE_DATA,
124 TFRC_RSTATE_TERM = 127, 134 TFRC_RSTATE_TERM = 127,
125}; 135};
@@ -147,18 +157,18 @@ struct ccid3_hc_rx_sock {
147#define ccid3hcrx_x_recv ccid3hcrx_tfrc.tfrcrx_x_recv 157#define ccid3hcrx_x_recv ccid3hcrx_tfrc.tfrcrx_x_recv
148#define ccid3hcrx_rtt ccid3hcrx_tfrc.tfrcrx_rtt 158#define ccid3hcrx_rtt ccid3hcrx_tfrc.tfrcrx_rtt
149#define ccid3hcrx_p ccid3hcrx_tfrc.tfrcrx_p 159#define ccid3hcrx_p ccid3hcrx_tfrc.tfrcrx_p
150 u64 ccid3hcrx_seqno_nonloss:48, 160 u64 ccid3hcrx_seqno_nonloss:48,
151 ccid3hcrx_ccval_nonloss:4, 161 ccid3hcrx_ccval_nonloss:4,
152 ccid3hcrx_ccval_last_counter:4; 162 ccid3hcrx_ccval_last_counter:4;
153 enum ccid3_hc_rx_states ccid3hcrx_state:8; 163 enum ccid3_hc_rx_states ccid3hcrx_state:8;
154 u32 ccid3hcrx_bytes_recv; 164 u32 ccid3hcrx_bytes_recv;
155 struct timeval ccid3hcrx_tstamp_last_feedback; 165 struct timeval ccid3hcrx_tstamp_last_feedback;
156 struct timeval ccid3hcrx_tstamp_last_ack; 166 struct timeval ccid3hcrx_tstamp_last_ack;
157 struct list_head ccid3hcrx_hist; 167 struct list_head ccid3hcrx_hist;
158 struct list_head ccid3hcrx_li_hist; 168 struct list_head ccid3hcrx_li_hist;
159 u16 ccid3hcrx_s; 169 u16 ccid3hcrx_s;
160 u32 ccid3hcrx_pinv; 170 u32 ccid3hcrx_pinv;
161 u32 ccid3hcrx_elapsed_time; 171 u32 ccid3hcrx_elapsed_time;
162}; 172};
163 173
164static inline struct ccid3_hc_tx_sock *ccid3_hc_tx_sk(const struct sock *sk) 174static inline struct ccid3_hc_tx_sock *ccid3_hc_tx_sk(const struct sock *sk)
diff --git a/net/dccp/ccids/lib/packet_history.c b/net/dccp/ccids/lib/packet_history.c
index b876c9c81c6..2e8ef42721e 100644
--- a/net/dccp/ccids/lib/packet_history.c
+++ b/net/dccp/ccids/lib/packet_history.c
@@ -36,9 +36,100 @@
36 36
37#include <linux/module.h> 37#include <linux/module.h>
38#include <linux/string.h> 38#include <linux/string.h>
39
40#include "packet_history.h" 39#include "packet_history.h"
41 40
41/*
42 * Transmitter History Routines
43 */
44struct dccp_tx_hist *dccp_tx_hist_new(const char *name)
45{
46 struct dccp_tx_hist *hist = kmalloc(sizeof(*hist), GFP_ATOMIC);
47 static const char dccp_tx_hist_mask[] = "tx_hist_%s";
48 char *slab_name;
49
50 if (hist == NULL)
51 goto out;
52
53 slab_name = kmalloc(strlen(name) + sizeof(dccp_tx_hist_mask) - 1,
54 GFP_ATOMIC);
55 if (slab_name == NULL)
56 goto out_free_hist;
57
58 sprintf(slab_name, dccp_tx_hist_mask, name);
59 hist->dccptxh_slab = kmem_cache_create(slab_name,
60 sizeof(struct dccp_tx_hist_entry),
61 0, SLAB_HWCACHE_ALIGN,
62 NULL, NULL);
63 if (hist->dccptxh_slab == NULL)
64 goto out_free_slab_name;
65out:
66 return hist;
67out_free_slab_name:
68 kfree(slab_name);
69out_free_hist:
70 kfree(hist);
71 hist = NULL;
72 goto out;
73}
74
75EXPORT_SYMBOL_GPL(dccp_tx_hist_new);
76
77void dccp_tx_hist_delete(struct dccp_tx_hist *hist)
78{
79 const char* name = kmem_cache_name(hist->dccptxh_slab);
80
81 kmem_cache_destroy(hist->dccptxh_slab);
82 kfree(name);
83 kfree(hist);
84}
85
86EXPORT_SYMBOL_GPL(dccp_tx_hist_delete);
87
88struct dccp_tx_hist_entry *
89 dccp_tx_hist_find_entry(const struct list_head *list, const u64 seq)
90{
91 struct dccp_tx_hist_entry *packet = NULL, *entry;
92
93 list_for_each_entry(entry, list, dccphtx_node)
94 if (entry->dccphtx_seqno == seq) {
95 packet = entry;
96 break;
97 }
98
99 return packet;
100}
101
102EXPORT_SYMBOL_GPL(dccp_tx_hist_find_entry);
103
104void dccp_tx_hist_purge(struct dccp_tx_hist *hist, struct list_head *list)
105{
106 struct dccp_tx_hist_entry *entry, *next;
107
108 list_for_each_entry_safe(entry, next, list, dccphtx_node) {
109 list_del_init(&entry->dccphtx_node);
110 dccp_tx_hist_entry_delete(hist, entry);
111 }
112}
113
114EXPORT_SYMBOL_GPL(dccp_tx_hist_purge);
115
116void dccp_tx_hist_purge_older(struct dccp_tx_hist *hist,
117 struct list_head *list,
118 struct dccp_tx_hist_entry *packet)
119{
120 struct dccp_tx_hist_entry *next;
121
122 list_for_each_entry_safe_continue(packet, next, list, dccphtx_node) {
123 list_del_init(&packet->dccphtx_node);
124 dccp_tx_hist_entry_delete(hist, packet);
125 }
126}
127
128EXPORT_SYMBOL_GPL(dccp_tx_hist_purge_older);
129
130/*
131 * Receiver History Routines
132 */
42struct dccp_rx_hist *dccp_rx_hist_new(const char *name) 133struct dccp_rx_hist *dccp_rx_hist_new(const char *name)
43{ 134{
44 struct dccp_rx_hist *hist = kmalloc(sizeof(*hist), GFP_ATOMIC); 135 struct dccp_rx_hist *hist = kmalloc(sizeof(*hist), GFP_ATOMIC);
@@ -83,18 +174,24 @@ void dccp_rx_hist_delete(struct dccp_rx_hist *hist)
83 174
84EXPORT_SYMBOL_GPL(dccp_rx_hist_delete); 175EXPORT_SYMBOL_GPL(dccp_rx_hist_delete);
85 176
86void dccp_rx_hist_purge(struct dccp_rx_hist *hist, struct list_head *list) 177int dccp_rx_hist_find_entry(const struct list_head *list, const u64 seq,
178 u8 *ccval)
87{ 179{
88 struct dccp_rx_hist_entry *entry, *next; 180 struct dccp_rx_hist_entry *packet = NULL, *entry;
89 181
90 list_for_each_entry_safe(entry, next, list, dccphrx_node) { 182 list_for_each_entry(entry, list, dccphrx_node)
91 list_del_init(&entry->dccphrx_node); 183 if (entry->dccphrx_seqno == seq) {
92 kmem_cache_free(hist->dccprxh_slab, entry); 184 packet = entry;
93 } 185 break;
94} 186 }
95 187
96EXPORT_SYMBOL_GPL(dccp_rx_hist_purge); 188 if (packet)
189 *ccval = packet->dccphrx_ccval;
97 190
191 return packet != NULL;
192}
193
194EXPORT_SYMBOL_GPL(dccp_rx_hist_find_entry);
98struct dccp_rx_hist_entry * 195struct dccp_rx_hist_entry *
99 dccp_rx_hist_find_data_packet(const struct list_head *list) 196 dccp_rx_hist_find_data_packet(const struct list_head *list)
100{ 197{
@@ -184,110 +281,18 @@ void dccp_rx_hist_add_packet(struct dccp_rx_hist *hist,
184 281
185EXPORT_SYMBOL_GPL(dccp_rx_hist_add_packet); 282EXPORT_SYMBOL_GPL(dccp_rx_hist_add_packet);
186 283
187struct dccp_tx_hist *dccp_tx_hist_new(const char *name) 284void dccp_rx_hist_purge(struct dccp_rx_hist *hist, struct list_head *list)
188{
189 struct dccp_tx_hist *hist = kmalloc(sizeof(*hist), GFP_ATOMIC);
190 static const char dccp_tx_hist_mask[] = "tx_hist_%s";
191 char *slab_name;
192
193 if (hist == NULL)
194 goto out;
195
196 slab_name = kmalloc(strlen(name) + sizeof(dccp_tx_hist_mask) - 1,
197 GFP_ATOMIC);
198 if (slab_name == NULL)
199 goto out_free_hist;
200
201 sprintf(slab_name, dccp_tx_hist_mask, name);
202 hist->dccptxh_slab = kmem_cache_create(slab_name,
203 sizeof(struct dccp_tx_hist_entry),
204 0, SLAB_HWCACHE_ALIGN,
205 NULL, NULL);
206 if (hist->dccptxh_slab == NULL)
207 goto out_free_slab_name;
208out:
209 return hist;
210out_free_slab_name:
211 kfree(slab_name);
212out_free_hist:
213 kfree(hist);
214 hist = NULL;
215 goto out;
216}
217
218EXPORT_SYMBOL_GPL(dccp_tx_hist_new);
219
220void dccp_tx_hist_delete(struct dccp_tx_hist *hist)
221{
222 const char* name = kmem_cache_name(hist->dccptxh_slab);
223
224 kmem_cache_destroy(hist->dccptxh_slab);
225 kfree(name);
226 kfree(hist);
227}
228
229EXPORT_SYMBOL_GPL(dccp_tx_hist_delete);
230
231struct dccp_tx_hist_entry *
232 dccp_tx_hist_find_entry(const struct list_head *list, const u64 seq)
233{
234 struct dccp_tx_hist_entry *packet = NULL, *entry;
235
236 list_for_each_entry(entry, list, dccphtx_node)
237 if (entry->dccphtx_seqno == seq) {
238 packet = entry;
239 break;
240 }
241
242 return packet;
243}
244
245EXPORT_SYMBOL_GPL(dccp_tx_hist_find_entry);
246
247int dccp_rx_hist_find_entry(const struct list_head *list, const u64 seq,
248 u8 *ccval)
249{
250 struct dccp_rx_hist_entry *packet = NULL, *entry;
251
252 list_for_each_entry(entry, list, dccphrx_node)
253 if (entry->dccphrx_seqno == seq) {
254 packet = entry;
255 break;
256 }
257
258 if (packet)
259 *ccval = packet->dccphrx_ccval;
260
261 return packet != NULL;
262}
263
264EXPORT_SYMBOL_GPL(dccp_rx_hist_find_entry);
265
266void dccp_tx_hist_purge_older(struct dccp_tx_hist *hist,
267 struct list_head *list,
268 struct dccp_tx_hist_entry *packet)
269{ 285{
270 struct dccp_tx_hist_entry *next; 286 struct dccp_rx_hist_entry *entry, *next;
271 287
272 list_for_each_entry_safe_continue(packet, next, list, dccphtx_node) { 288 list_for_each_entry_safe(entry, next, list, dccphrx_node) {
273 list_del_init(&packet->dccphtx_node); 289 list_del_init(&entry->dccphrx_node);
274 dccp_tx_hist_entry_delete(hist, packet); 290 kmem_cache_free(hist->dccprxh_slab, entry);
275 } 291 }
276} 292}
277 293
278EXPORT_SYMBOL_GPL(dccp_tx_hist_purge_older); 294EXPORT_SYMBOL_GPL(dccp_rx_hist_purge);
279
280void dccp_tx_hist_purge(struct dccp_tx_hist *hist, struct list_head *list)
281{
282 struct dccp_tx_hist_entry *entry, *next;
283
284 list_for_each_entry_safe(entry, next, list, dccphtx_node) {
285 list_del_init(&entry->dccphtx_node);
286 dccp_tx_hist_entry_delete(hist, entry);
287 }
288}
289 295
290EXPORT_SYMBOL_GPL(dccp_tx_hist_purge);
291 296
292MODULE_AUTHOR("Ian McDonald <ian.mcdonald@jandi.co.nz>, " 297MODULE_AUTHOR("Ian McDonald <ian.mcdonald@jandi.co.nz>, "
293 "Arnaldo Carvalho de Melo <acme@ghostprotocols.net>"); 298 "Arnaldo Carvalho de Melo <acme@ghostprotocols.net>");
diff --git a/net/dccp/ccids/lib/packet_history.h b/net/dccp/ccids/lib/packet_history.h
index 9a8bcf224aa..1f960c19ea1 100644
--- a/net/dccp/ccids/lib/packet_history.h
+++ b/net/dccp/ccids/lib/packet_history.h
@@ -49,43 +49,27 @@
49#define TFRC_WIN_COUNT_PER_RTT 4 49#define TFRC_WIN_COUNT_PER_RTT 4
50#define TFRC_WIN_COUNT_LIMIT 16 50#define TFRC_WIN_COUNT_LIMIT 16
51 51
52/*
53 * Transmitter History data structures and declarations
54 */
52struct dccp_tx_hist_entry { 55struct dccp_tx_hist_entry {
53 struct list_head dccphtx_node; 56 struct list_head dccphtx_node;
54 u64 dccphtx_seqno:48, 57 u64 dccphtx_seqno:48,
55 dccphtx_ccval:4,
56 dccphtx_sent:1; 58 dccphtx_sent:1;
57 u32 dccphtx_rtt; 59 u32 dccphtx_rtt;
58 struct timeval dccphtx_tstamp; 60 struct timeval dccphtx_tstamp;
59}; 61};
60 62
61struct dccp_rx_hist_entry {
62 struct list_head dccphrx_node;
63 u64 dccphrx_seqno:48,
64 dccphrx_ccval:4,
65 dccphrx_type:4;
66 u32 dccphrx_ndp; /* In fact it is from 8 to 24 bits */
67 struct timeval dccphrx_tstamp;
68};
69
70struct dccp_tx_hist { 63struct dccp_tx_hist {
71 struct kmem_cache *dccptxh_slab; 64 struct kmem_cache *dccptxh_slab;
72}; 65};
73 66
74extern struct dccp_tx_hist *dccp_tx_hist_new(const char *name); 67extern struct dccp_tx_hist *dccp_tx_hist_new(const char *name);
75extern void dccp_tx_hist_delete(struct dccp_tx_hist *hist); 68extern void dccp_tx_hist_delete(struct dccp_tx_hist *hist);
76
77struct dccp_rx_hist {
78 struct kmem_cache *dccprxh_slab;
79};
80
81extern struct dccp_rx_hist *dccp_rx_hist_new(const char *name);
82extern void dccp_rx_hist_delete(struct dccp_rx_hist *hist);
83extern struct dccp_rx_hist_entry *
84 dccp_rx_hist_find_data_packet(const struct list_head *list);
85 69
86static inline struct dccp_tx_hist_entry * 70static inline struct dccp_tx_hist_entry *
87 dccp_tx_hist_entry_new(struct dccp_tx_hist *hist, 71 dccp_tx_hist_entry_new(struct dccp_tx_hist *hist,
88 const gfp_t prio) 72 const gfp_t prio)
89{ 73{
90 struct dccp_tx_hist_entry *entry = kmem_cache_alloc(hist->dccptxh_slab, 74 struct dccp_tx_hist_entry *entry = kmem_cache_alloc(hist->dccptxh_slab,
91 prio); 75 prio);
@@ -96,18 +80,20 @@ static inline struct dccp_tx_hist_entry *
96 return entry; 80 return entry;
97} 81}
98 82
99static inline void dccp_tx_hist_entry_delete(struct dccp_tx_hist *hist, 83static inline struct dccp_tx_hist_entry *
100 struct dccp_tx_hist_entry *entry) 84 dccp_tx_hist_head(struct list_head *list)
101{ 85{
102 if (entry != NULL) 86 struct dccp_tx_hist_entry *head = NULL;
103 kmem_cache_free(hist->dccptxh_slab, entry); 87
88 if (!list_empty(list))
89 head = list_entry(list->next, struct dccp_tx_hist_entry,
90 dccphtx_node);
91 return head;
104} 92}
105 93
106extern struct dccp_tx_hist_entry * 94extern struct dccp_tx_hist_entry *
107 dccp_tx_hist_find_entry(const struct list_head *list, 95 dccp_tx_hist_find_entry(const struct list_head *list,
108 const u64 seq); 96 const u64 seq);
109extern int dccp_rx_hist_find_entry(const struct list_head *list, const u64 seq,
110 u8 *ccval);
111 97
112static inline void dccp_tx_hist_add_entry(struct list_head *list, 98static inline void dccp_tx_hist_add_entry(struct list_head *list,
113 struct dccp_tx_hist_entry *entry) 99 struct dccp_tx_hist_entry *entry)
@@ -115,30 +101,45 @@ static inline void dccp_tx_hist_add_entry(struct list_head *list,
115 list_add(&entry->dccphtx_node, list); 101 list_add(&entry->dccphtx_node, list);
116} 102}
117 103
104static inline void dccp_tx_hist_entry_delete(struct dccp_tx_hist *hist,
105 struct dccp_tx_hist_entry *entry)
106{
107 if (entry != NULL)
108 kmem_cache_free(hist->dccptxh_slab, entry);
109}
110
111extern void dccp_tx_hist_purge(struct dccp_tx_hist *hist,
112 struct list_head *list);
113
118extern void dccp_tx_hist_purge_older(struct dccp_tx_hist *hist, 114extern void dccp_tx_hist_purge_older(struct dccp_tx_hist *hist,
119 struct list_head *list, 115 struct list_head *list,
120 struct dccp_tx_hist_entry *next); 116 struct dccp_tx_hist_entry *next);
121 117
122extern void dccp_tx_hist_purge(struct dccp_tx_hist *hist, 118/*
123 struct list_head *list); 119 * Receiver History data structures and declarations
120 */
121struct dccp_rx_hist_entry {
122 struct list_head dccphrx_node;
123 u64 dccphrx_seqno:48,
124 dccphrx_ccval:4,
125 dccphrx_type:4;
126 u32 dccphrx_ndp; /* In fact it is from 8 to 24 bits */
127 struct timeval dccphrx_tstamp;
128};
124 129
125static inline struct dccp_tx_hist_entry * 130struct dccp_rx_hist {
126 dccp_tx_hist_head(struct list_head *list) 131 struct kmem_cache *dccprxh_slab;
127{ 132};
128 struct dccp_tx_hist_entry *head = NULL;
129 133
130 if (!list_empty(list)) 134extern struct dccp_rx_hist *dccp_rx_hist_new(const char *name);
131 head = list_entry(list->next, struct dccp_tx_hist_entry, 135extern void dccp_rx_hist_delete(struct dccp_rx_hist *hist);
132 dccphtx_node);
133 return head;
134}
135 136
136static inline struct dccp_rx_hist_entry * 137static inline struct dccp_rx_hist_entry *
137 dccp_rx_hist_entry_new(struct dccp_rx_hist *hist, 138 dccp_rx_hist_entry_new(struct dccp_rx_hist *hist,
138 const struct sock *sk, 139 const struct sock *sk,
139 const u32 ndp, 140 const u32 ndp,
140 const struct sk_buff *skb, 141 const struct sk_buff *skb,
141 const gfp_t prio) 142 const gfp_t prio)
142{ 143{
143 struct dccp_rx_hist_entry *entry = kmem_cache_alloc(hist->dccprxh_slab, 144 struct dccp_rx_hist_entry *entry = kmem_cache_alloc(hist->dccprxh_slab,
144 prio); 145 prio);
@@ -156,18 +157,8 @@ static inline struct dccp_rx_hist_entry *
156 return entry; 157 return entry;
157} 158}
158 159
159static inline void dccp_rx_hist_entry_delete(struct dccp_rx_hist *hist,
160 struct dccp_rx_hist_entry *entry)
161{
162 if (entry != NULL)
163 kmem_cache_free(hist->dccprxh_slab, entry);
164}
165
166extern void dccp_rx_hist_purge(struct dccp_rx_hist *hist,
167 struct list_head *list);
168
169static inline struct dccp_rx_hist_entry * 160static inline struct dccp_rx_hist_entry *
170 dccp_rx_hist_head(struct list_head *list) 161 dccp_rx_hist_head(struct list_head *list)
171{ 162{
172 struct dccp_rx_hist_entry *head = NULL; 163 struct dccp_rx_hist_entry *head = NULL;
173 164
@@ -177,6 +168,27 @@ static inline struct dccp_rx_hist_entry *
177 return head; 168 return head;
178} 169}
179 170
171extern int dccp_rx_hist_find_entry(const struct list_head *list, const u64 seq,
172 u8 *ccval);
173extern struct dccp_rx_hist_entry *
174 dccp_rx_hist_find_data_packet(const struct list_head *list);
175
176extern void dccp_rx_hist_add_packet(struct dccp_rx_hist *hist,
177 struct list_head *rx_list,
178 struct list_head *li_list,
179 struct dccp_rx_hist_entry *packet,
180 u64 nonloss_seqno);
181
182static inline void dccp_rx_hist_entry_delete(struct dccp_rx_hist *hist,
183 struct dccp_rx_hist_entry *entry)
184{
185 if (entry != NULL)
186 kmem_cache_free(hist->dccprxh_slab, entry);
187}
188
189extern void dccp_rx_hist_purge(struct dccp_rx_hist *hist,
190 struct list_head *list);
191
180static inline int 192static inline int
181 dccp_rx_hist_entry_data_packet(const struct dccp_rx_hist_entry *entry) 193 dccp_rx_hist_entry_data_packet(const struct dccp_rx_hist_entry *entry)
182{ 194{
@@ -184,12 +196,6 @@ static inline int
184 entry->dccphrx_type == DCCP_PKT_DATAACK; 196 entry->dccphrx_type == DCCP_PKT_DATAACK;
185} 197}
186 198
187extern void dccp_rx_hist_add_packet(struct dccp_rx_hist *hist,
188 struct list_head *rx_list,
189 struct list_head *li_list,
190 struct dccp_rx_hist_entry *packet,
191 u64 nonloss_seqno);
192
193extern u64 dccp_rx_hist_detect_loss(struct list_head *rx_list, 199extern u64 dccp_rx_hist_detect_loss(struct list_head *rx_list,
194 struct list_head *li_list, u8 *win_loss); 200 struct list_head *li_list, u8 *win_loss);
195 201
diff --git a/net/dccp/ccids/lib/tfrc.h b/net/dccp/ccids/lib/tfrc.h
index 45f30f59ea2..faf5f7e219e 100644
--- a/net/dccp/ccids/lib/tfrc.h
+++ b/net/dccp/ccids/lib/tfrc.h
@@ -13,8 +13,29 @@
13 * the Free Software Foundation; either version 2 of the License, or 13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version. 14 * (at your option) any later version.
15 */ 15 */
16
17#include <linux/types.h> 16#include <linux/types.h>
17#include <asm/div64.h>
18
19/* integer-arithmetic divisions of type (a * 1000000)/b */
20static inline u64 scaled_div(u64 a, u32 b)
21{
22 BUG_ON(b==0);
23 a *= 1000000;
24 do_div(a, b);
25 return a;
26}
27
28static inline u32 scaled_div32(u64 a, u32 b)
29{
30 u64 result = scaled_div(a, b);
31
32 if (result > UINT_MAX) {
33 DCCP_CRIT("Overflow: a(%llu)/b(%u) > ~0U",
34 (unsigned long long)a, b);
35 return UINT_MAX;
36 }
37 return result;
38}
18 39
19extern u32 tfrc_calc_x(u16 s, u32 R, u32 p); 40extern u32 tfrc_calc_x(u16 s, u32 R, u32 p);
20extern u32 tfrc_calc_x_reverse_lookup(u32 fvalue); 41extern u32 tfrc_calc_x_reverse_lookup(u32 fvalue);
diff --git a/net/dccp/ccids/lib/tfrc_equation.c b/net/dccp/ccids/lib/tfrc_equation.c
index ddac2c511e2..90009fd77e1 100644
--- a/net/dccp/ccids/lib/tfrc_equation.c
+++ b/net/dccp/ccids/lib/tfrc_equation.c
@@ -13,7 +13,6 @@
13 */ 13 */
14 14
15#include <linux/module.h> 15#include <linux/module.h>
16#include <asm/div64.h>
17#include "../../dccp.h" 16#include "../../dccp.h"
18#include "tfrc.h" 17#include "tfrc.h"
19 18
@@ -616,15 +615,12 @@ static inline u32 tfrc_binsearch(u32 fval, u8 small)
616 * @R: RTT scaled by 1000000 (i.e., microseconds) 615 * @R: RTT scaled by 1000000 (i.e., microseconds)
617 * @p: loss ratio estimate scaled by 1000000 616 * @p: loss ratio estimate scaled by 1000000
618 * Returns X_calc in bytes per second (not scaled). 617 * Returns X_calc in bytes per second (not scaled).
619 *
620 * Note: DO NOT alter this code unless you run test cases against it,
621 * as the code has been optimized to stop underflow/overflow.
622 */ 618 */
623u32 tfrc_calc_x(u16 s, u32 R, u32 p) 619u32 tfrc_calc_x(u16 s, u32 R, u32 p)
624{ 620{
625 int index; 621 u16 index;
626 u32 f; 622 u32 f;
627 u64 tmp1, tmp2; 623 u64 result;
628 624
629 /* check against invalid parameters and divide-by-zero */ 625 /* check against invalid parameters and divide-by-zero */
630 BUG_ON(p > 1000000); /* p must not exceed 100% */ 626 BUG_ON(p > 1000000); /* p must not exceed 100% */
@@ -650,15 +646,17 @@ u32 tfrc_calc_x(u16 s, u32 R, u32 p)
650 f = tfrc_calc_x_lookup[index][0]; 646 f = tfrc_calc_x_lookup[index][0];
651 } 647 }
652 648
653 /* The following computes X = s/(R*f(p)) in bytes per second. Since f(p) 649 /*
654 * and R are both scaled by 1000000, we need to multiply by 1000000^2. 650 * Compute X = s/(R*f(p)) in bytes per second.
655 * ==> DO NOT alter this unless you test against overflow on 32 bit */ 651 * Since f(p) and R are both scaled by 1000000, we need to multiply by
656 tmp1 = ((u64)s * 100000000); 652 * 1000000^2. To avoid overflow, the result is computed in two stages.
657 tmp2 = ((u64)R * (u64)f); 653 * This works under almost all reasonable operational conditions, for a
658 do_div(tmp2, 10000); 654 * wide range of parameters. Yet, should some strange combination of
659 do_div(tmp1, tmp2); 655 * parameters result in overflow, the use of scaled_div32 will catch
660 656 * this and return UINT_MAX - which is a logically adequate consequence.
661 return (u32)tmp1; 657 */
658 result = scaled_div(s, R);
659 return scaled_div32(result, f);
662} 660}
663 661
664EXPORT_SYMBOL_GPL(tfrc_calc_x); 662EXPORT_SYMBOL_GPL(tfrc_calc_x);
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index 68886986c8e..a0900bf98e6 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -80,8 +80,6 @@ extern void dccp_time_wait(struct sock *sk, int state, int timeo);
80 80
81#define DCCP_RTO_MAX ((unsigned)(120 * HZ)) /* FIXME: using TCP value */ 81#define DCCP_RTO_MAX ((unsigned)(120 * HZ)) /* FIXME: using TCP value */
82 82
83#define DCCP_XMIT_TIMEO 30000 /* Time/msecs for blocking transmit per packet */
84
85/* sysctl variables for DCCP */ 83/* sysctl variables for DCCP */
86extern int sysctl_dccp_request_retries; 84extern int sysctl_dccp_request_retries;
87extern int sysctl_dccp_retries1; 85extern int sysctl_dccp_retries1;
@@ -434,6 +432,7 @@ static inline void timeval_sub_usecs(struct timeval *tv,
434 tv->tv_sec--; 432 tv->tv_sec--;
435 tv->tv_usec += USEC_PER_SEC; 433 tv->tv_usec += USEC_PER_SEC;
436 } 434 }
435 DCCP_BUG_ON(tv->tv_sec < 0);
437} 436}
438 437
439#ifdef CONFIG_SYSCTL 438#ifdef CONFIG_SYSCTL
diff --git a/net/dccp/feat.c b/net/dccp/feat.c
index 4dc487f27a1..95b6927ec65 100644
--- a/net/dccp/feat.c
+++ b/net/dccp/feat.c
@@ -329,7 +329,7 @@ static void dccp_feat_empty_confirm(struct dccp_minisock *dmsk,
329 switch (type) { 329 switch (type) {
330 case DCCPO_CHANGE_L: opt->dccpop_type = DCCPO_CONFIRM_R; break; 330 case DCCPO_CHANGE_L: opt->dccpop_type = DCCPO_CONFIRM_R; break;
331 case DCCPO_CHANGE_R: opt->dccpop_type = DCCPO_CONFIRM_L; break; 331 case DCCPO_CHANGE_R: opt->dccpop_type = DCCPO_CONFIRM_L; break;
332 default: DCCP_WARN("invalid type %d\n", type); return; 332 default: DCCP_WARN("invalid type %d\n", type); return;
333 333
334 } 334 }
335 opt->dccpop_feat = feature; 335 opt->dccpop_feat = feature;
@@ -427,7 +427,7 @@ int dccp_feat_confirm_recv(struct sock *sk, u8 type, u8 feature,
427 switch (type) { 427 switch (type) {
428 case DCCPO_CONFIRM_L: t = DCCPO_CHANGE_R; break; 428 case DCCPO_CONFIRM_L: t = DCCPO_CHANGE_R; break;
429 case DCCPO_CONFIRM_R: t = DCCPO_CHANGE_L; break; 429 case DCCPO_CONFIRM_R: t = DCCPO_CHANGE_L; break;
430 default: DCCP_WARN("invalid type %d\n", type); 430 default: DCCP_WARN("invalid type %d\n", type);
431 return 1; 431 return 1;
432 432
433 } 433 }
@@ -610,7 +610,7 @@ const char *dccp_feat_typename(const u8 type)
610 case DCCPO_CHANGE_R: return("ChangeR"); 610 case DCCPO_CHANGE_R: return("ChangeR");
611 case DCCPO_CONFIRM_R: return("ConfirmR"); 611 case DCCPO_CONFIRM_R: return("ConfirmR");
612 /* the following case must not appear in feature negotation */ 612 /* the following case must not appear in feature negotation */
613 default: dccp_pr_debug("unknown type %d [BUG!]\n", type); 613 default: dccp_pr_debug("unknown type %d [BUG!]\n", type);
614 } 614 }
615 return NULL; 615 return NULL;
616} 616}
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 7371a2f3acf..565bc80557c 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * net/dccp/input.c 2 * net/dccp/input.c
3 * 3 *
4 * An implementation of the DCCP protocol 4 * An implementation of the DCCP protocol
5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br> 5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
6 * 6 *
@@ -82,7 +82,7 @@ static int dccp_check_seqno(struct sock *sk, struct sk_buff *skb)
82 * Otherwise, 82 * Otherwise,
83 * Drop packet and return 83 * Drop packet and return
84 */ 84 */
85 if (dh->dccph_type == DCCP_PKT_SYNC || 85 if (dh->dccph_type == DCCP_PKT_SYNC ||
86 dh->dccph_type == DCCP_PKT_SYNCACK) { 86 dh->dccph_type == DCCP_PKT_SYNCACK) {
87 if (between48(DCCP_SKB_CB(skb)->dccpd_ack_seq, 87 if (between48(DCCP_SKB_CB(skb)->dccpd_ack_seq,
88 dp->dccps_awl, dp->dccps_awh) && 88 dp->dccps_awl, dp->dccps_awh) &&
@@ -185,8 +185,8 @@ static int __dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
185 dccp_rcv_close(sk, skb); 185 dccp_rcv_close(sk, skb);
186 return 0; 186 return 0;
187 case DCCP_PKT_REQUEST: 187 case DCCP_PKT_REQUEST:
188 /* Step 7 188 /* Step 7
189 * or (S.is_server and P.type == Response) 189 * or (S.is_server and P.type == Response)
190 * or (S.is_client and P.type == Request) 190 * or (S.is_client and P.type == Request)
191 * or (S.state >= OPEN and P.type == Request 191 * or (S.state >= OPEN and P.type == Request
192 * and P.seqno >= S.OSR) 192 * and P.seqno >= S.OSR)
@@ -248,8 +248,18 @@ int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
248 DCCP_ACKVEC_STATE_RECEIVED)) 248 DCCP_ACKVEC_STATE_RECEIVED))
249 goto discard; 249 goto discard;
250 250
251 ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb); 251 /*
252 ccid_hc_tx_packet_recv(dp->dccps_hc_tx_ccid, sk, skb); 252 * Deliver to the CCID module in charge.
253 * FIXME: Currently DCCP operates one-directional only, i.e. a listening
254 * server is not at the same time a connecting client. There is
255 * not much sense in delivering to both rx/tx sides at the moment
256 * (only one is active at a time); when moving to bidirectional
257 * service, this needs to be revised.
258 */
259 if (dccp_sk(sk)->dccps_role == DCCP_ROLE_SERVER)
260 ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb);
261 else
262 ccid_hc_tx_packet_recv(dp->dccps_hc_tx_ccid, sk, skb);
253 263
254 return __dccp_rcv_established(sk, skb, dh, len); 264 return __dccp_rcv_established(sk, skb, dh, len);
255discard: 265discard:
@@ -264,7 +274,7 @@ static int dccp_rcv_request_sent_state_process(struct sock *sk,
264 const struct dccp_hdr *dh, 274 const struct dccp_hdr *dh,
265 const unsigned len) 275 const unsigned len)
266{ 276{
267 /* 277 /*
268 * Step 4: Prepare sequence numbers in REQUEST 278 * Step 4: Prepare sequence numbers in REQUEST
269 * If S.state == REQUEST, 279 * If S.state == REQUEST,
270 * If (P.type == Response or P.type == Reset) 280 * If (P.type == Response or P.type == Reset)
@@ -332,7 +342,7 @@ static int dccp_rcv_request_sent_state_process(struct sock *sk,
332 * from the Response * / 342 * from the Response * /
333 * S.state := PARTOPEN 343 * S.state := PARTOPEN
334 * Set PARTOPEN timer 344 * Set PARTOPEN timer
335 * Continue with S.state == PARTOPEN 345 * Continue with S.state == PARTOPEN
336 * / * Step 12 will send the Ack completing the 346 * / * Step 12 will send the Ack completing the
337 * three-way handshake * / 347 * three-way handshake * /
338 */ 348 */
@@ -363,7 +373,7 @@ static int dccp_rcv_request_sent_state_process(struct sock *sk,
363 */ 373 */
364 __kfree_skb(skb); 374 __kfree_skb(skb);
365 return 0; 375 return 0;
366 } 376 }
367 dccp_send_ack(sk); 377 dccp_send_ack(sk);
368 return -1; 378 return -1;
369 } 379 }
@@ -371,7 +381,7 @@ static int dccp_rcv_request_sent_state_process(struct sock *sk,
371out_invalid_packet: 381out_invalid_packet:
372 /* dccp_v4_do_rcv will send a reset */ 382 /* dccp_v4_do_rcv will send a reset */
373 DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_PACKET_ERROR; 383 DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_PACKET_ERROR;
374 return 1; 384 return 1;
375} 385}
376 386
377static int dccp_rcv_respond_partopen_state_process(struct sock *sk, 387static int dccp_rcv_respond_partopen_state_process(struct sock *sk,
@@ -478,14 +488,17 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
478 if (dcb->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ) 488 if (dcb->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
479 dccp_event_ack_recv(sk, skb); 489 dccp_event_ack_recv(sk, skb);
480 490
481 if (dccp_msk(sk)->dccpms_send_ack_vector && 491 if (dccp_msk(sk)->dccpms_send_ack_vector &&
482 dccp_ackvec_add(dp->dccps_hc_rx_ackvec, sk, 492 dccp_ackvec_add(dp->dccps_hc_rx_ackvec, sk,
483 DCCP_SKB_CB(skb)->dccpd_seq, 493 DCCP_SKB_CB(skb)->dccpd_seq,
484 DCCP_ACKVEC_STATE_RECEIVED)) 494 DCCP_ACKVEC_STATE_RECEIVED))
485 goto discard; 495 goto discard;
486 496
487 ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb); 497 /* XXX see the comments in dccp_rcv_established about this */
488 ccid_hc_tx_packet_recv(dp->dccps_hc_tx_ccid, sk, skb); 498 if (dccp_sk(sk)->dccps_role == DCCP_ROLE_SERVER)
499 ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb);
500 else
501 ccid_hc_tx_packet_recv(dp->dccps_hc_tx_ccid, sk, skb);
489 } 502 }
490 503
491 /* 504 /*
@@ -567,7 +580,7 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
567 } 580 }
568 } 581 }
569 582
570 if (!queued) { 583 if (!queued) {
571discard: 584discard:
572 __kfree_skb(skb); 585 __kfree_skb(skb);
573 } 586 }
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index ff81679c9f1..90c74b4adb7 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -157,7 +157,7 @@ static inline void dccp_do_pmtu_discovery(struct sock *sk,
157 /* We don't check in the destentry if pmtu discovery is forbidden 157 /* We don't check in the destentry if pmtu discovery is forbidden
158 * on this route. We just assume that no packet_to_big packets 158 * on this route. We just assume that no packet_to_big packets
159 * are send back when pmtu discovery is not active. 159 * are send back when pmtu discovery is not active.
160 * There is a small race when the user changes this flag in the 160 * There is a small race when the user changes this flag in the
161 * route, but I think that's acceptable. 161 * route, but I think that's acceptable.
162 */ 162 */
163 if ((dst = __sk_dst_check(sk, 0)) == NULL) 163 if ((dst = __sk_dst_check(sk, 0)) == NULL)
@@ -467,7 +467,7 @@ static struct dst_entry* dccp_v4_route_skb(struct sock *sk,
467 .uli_u = { .ports = 467 .uli_u = { .ports =
468 { .sport = dccp_hdr(skb)->dccph_dport, 468 { .sport = dccp_hdr(skb)->dccph_dport,
469 .dport = dccp_hdr(skb)->dccph_sport } 469 .dport = dccp_hdr(skb)->dccph_sport }
470 } 470 }
471 }; 471 };
472 472
473 security_skb_classify_flow(skb, &fl); 473 security_skb_classify_flow(skb, &fl);
@@ -595,7 +595,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
595 struct inet_request_sock *ireq; 595 struct inet_request_sock *ireq;
596 struct request_sock *req; 596 struct request_sock *req;
597 struct dccp_request_sock *dreq; 597 struct dccp_request_sock *dreq;
598 const __be32 service = dccp_hdr_request(skb)->dccph_req_service; 598 const __be32 service = dccp_hdr_request(skb)->dccph_req_service;
599 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); 599 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
600 __u8 reset_code = DCCP_RESET_CODE_TOO_BUSY; 600 __u8 reset_code = DCCP_RESET_CODE_TOO_BUSY;
601 601
@@ -609,7 +609,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
609 if (dccp_bad_service_code(sk, service)) { 609 if (dccp_bad_service_code(sk, service)) {
610 reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE; 610 reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE;
611 goto drop; 611 goto drop;
612 } 612 }
613 /* 613 /*
614 * TW buckets are converted to open requests without 614 * TW buckets are converted to open requests without
615 * limitations, they conserve resources and peer is 615 * limitations, they conserve resources and peer is
@@ -644,7 +644,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
644 ireq->rmt_addr = skb->nh.iph->saddr; 644 ireq->rmt_addr = skb->nh.iph->saddr;
645 ireq->opt = NULL; 645 ireq->opt = NULL;
646 646
647 /* 647 /*
648 * Step 3: Process LISTEN state 648 * Step 3: Process LISTEN state
649 * 649 *
650 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie 650 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
@@ -846,15 +846,15 @@ static int dccp_v4_rcv(struct sk_buff *skb)
846 } 846 }
847 847
848 /* Step 2: 848 /* Step 2:
849 * Look up flow ID in table and get corresponding socket */ 849 * Look up flow ID in table and get corresponding socket */
850 sk = __inet_lookup(&dccp_hashinfo, 850 sk = __inet_lookup(&dccp_hashinfo,
851 skb->nh.iph->saddr, dh->dccph_sport, 851 skb->nh.iph->saddr, dh->dccph_sport,
852 skb->nh.iph->daddr, dh->dccph_dport, 852 skb->nh.iph->daddr, dh->dccph_dport,
853 inet_iif(skb)); 853 inet_iif(skb));
854 854
855 /* 855 /*
856 * Step 2: 856 * Step 2:
857 * If no socket ... 857 * If no socket ...
858 */ 858 */
859 if (sk == NULL) { 859 if (sk == NULL) {
860 dccp_pr_debug("failed to look up flow ID in table and " 860 dccp_pr_debug("failed to look up flow ID in table and "
@@ -862,9 +862,9 @@ static int dccp_v4_rcv(struct sk_buff *skb)
862 goto no_dccp_socket; 862 goto no_dccp_socket;
863 } 863 }
864 864
865 /* 865 /*
866 * Step 2: 866 * Step 2:
867 * ... or S.state == TIMEWAIT, 867 * ... or S.state == TIMEWAIT,
868 * Generate Reset(No Connection) unless P.type == Reset 868 * Generate Reset(No Connection) unless P.type == Reset
869 * Drop packet and return 869 * Drop packet and return
870 */ 870 */
@@ -876,8 +876,8 @@ static int dccp_v4_rcv(struct sk_buff *skb)
876 876
877 /* 877 /*
878 * RFC 4340, sec. 9.2.1: Minimum Checksum Coverage 878 * RFC 4340, sec. 9.2.1: Minimum Checksum Coverage
879 * o if MinCsCov = 0, only packets with CsCov = 0 are accepted 879 * o if MinCsCov = 0, only packets with CsCov = 0 are accepted
880 * o if MinCsCov > 0, also accept packets with CsCov >= MinCsCov 880 * o if MinCsCov > 0, also accept packets with CsCov >= MinCsCov
881 */ 881 */
882 min_cov = dccp_sk(sk)->dccps_pcrlen; 882 min_cov = dccp_sk(sk)->dccps_pcrlen;
883 if (dh->dccph_cscov && (min_cov == 0 || dh->dccph_cscov < min_cov)) { 883 if (dh->dccph_cscov && (min_cov == 0 || dh->dccph_cscov < min_cov)) {
@@ -900,7 +900,7 @@ no_dccp_socket:
900 goto discard_it; 900 goto discard_it;
901 /* 901 /*
902 * Step 2: 902 * Step 2:
903 * If no socket ... 903 * If no socket ...
904 * Generate Reset(No Connection) unless P.type == Reset 904 * Generate Reset(No Connection) unless P.type == Reset
905 * Drop packet and return 905 * Drop packet and return
906 */ 906 */
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index c7aaa2574f5..6b91a9dd041 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -77,7 +77,7 @@ static inline void dccp_v6_send_check(struct sock *sk, int unused_value,
77} 77}
78 78
79static inline __u32 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr, 79static inline __u32 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
80 __be16 sport, __be16 dport ) 80 __be16 sport, __be16 dport )
81{ 81{
82 return secure_tcpv6_sequence_number(saddr, daddr, sport, dport); 82 return secure_tcpv6_sequence_number(saddr, daddr, sport, dport);
83} 83}
@@ -329,7 +329,7 @@ static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
329 skb = alloc_skb(dccp_v6_ctl_socket->sk->sk_prot->max_header, 329 skb = alloc_skb(dccp_v6_ctl_socket->sk->sk_prot->max_header,
330 GFP_ATOMIC); 330 GFP_ATOMIC);
331 if (skb == NULL) 331 if (skb == NULL)
332 return; 332 return;
333 333
334 skb_reserve(skb, dccp_v6_ctl_socket->sk->sk_prot->max_header); 334 skb_reserve(skb, dccp_v6_ctl_socket->sk->sk_prot->max_header);
335 335
@@ -353,7 +353,7 @@ static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
353 353
354 dccp_csum_outgoing(skb); 354 dccp_csum_outgoing(skb);
355 dh->dccph_checksum = dccp_v6_csum_finish(skb, &rxskb->nh.ipv6h->saddr, 355 dh->dccph_checksum = dccp_v6_csum_finish(skb, &rxskb->nh.ipv6h->saddr,
356 &rxskb->nh.ipv6h->daddr); 356 &rxskb->nh.ipv6h->daddr);
357 357
358 memset(&fl, 0, sizeof(fl)); 358 memset(&fl, 0, sizeof(fl));
359 ipv6_addr_copy(&fl.fl6_dst, &rxskb->nh.ipv6h->saddr); 359 ipv6_addr_copy(&fl.fl6_dst, &rxskb->nh.ipv6h->saddr);
@@ -424,7 +424,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
424 struct dccp_request_sock *dreq; 424 struct dccp_request_sock *dreq;
425 struct inet6_request_sock *ireq6; 425 struct inet6_request_sock *ireq6;
426 struct ipv6_pinfo *np = inet6_sk(sk); 426 struct ipv6_pinfo *np = inet6_sk(sk);
427 const __be32 service = dccp_hdr_request(skb)->dccph_req_service; 427 const __be32 service = dccp_hdr_request(skb)->dccph_req_service;
428 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); 428 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
429 __u8 reset_code = DCCP_RESET_CODE_TOO_BUSY; 429 __u8 reset_code = DCCP_RESET_CODE_TOO_BUSY;
430 430
@@ -437,7 +437,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
437 if (dccp_bad_service_code(sk, service)) { 437 if (dccp_bad_service_code(sk, service)) {
438 reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE; 438 reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE;
439 goto drop; 439 goto drop;
440 } 440 }
441 /* 441 /*
442 * There are no SYN attacks on IPv6, yet... 442 * There are no SYN attacks on IPv6, yet...
443 */ 443 */
@@ -787,7 +787,7 @@ static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
787 * otherwise we just shortcircuit this and continue with 787 * otherwise we just shortcircuit this and continue with
788 * the new socket.. 788 * the new socket..
789 */ 789 */
790 if (nsk != sk) { 790 if (nsk != sk) {
791 if (dccp_child_process(sk, nsk, skb)) 791 if (dccp_child_process(sk, nsk, skb))
792 goto reset; 792 goto reset;
793 if (opt_skb != NULL) 793 if (opt_skb != NULL)
@@ -843,14 +843,14 @@ static int dccp_v6_rcv(struct sk_buff **pskb)
843 DCCP_SKB_CB(skb)->dccpd_ack_seq = dccp_hdr_ack_seq(skb); 843 DCCP_SKB_CB(skb)->dccpd_ack_seq = dccp_hdr_ack_seq(skb);
844 844
845 /* Step 2: 845 /* Step 2:
846 * Look up flow ID in table and get corresponding socket */ 846 * Look up flow ID in table and get corresponding socket */
847 sk = __inet6_lookup(&dccp_hashinfo, &skb->nh.ipv6h->saddr, 847 sk = __inet6_lookup(&dccp_hashinfo, &skb->nh.ipv6h->saddr,
848 dh->dccph_sport, 848 dh->dccph_sport,
849 &skb->nh.ipv6h->daddr, ntohs(dh->dccph_dport), 849 &skb->nh.ipv6h->daddr, ntohs(dh->dccph_dport),
850 inet6_iif(skb)); 850 inet6_iif(skb));
851 /* 851 /*
852 * Step 2: 852 * Step 2:
853 * If no socket ... 853 * If no socket ...
854 */ 854 */
855 if (sk == NULL) { 855 if (sk == NULL) {
856 dccp_pr_debug("failed to look up flow ID in table and " 856 dccp_pr_debug("failed to look up flow ID in table and "
@@ -860,7 +860,7 @@ static int dccp_v6_rcv(struct sk_buff **pskb)
860 860
861 /* 861 /*
862 * Step 2: 862 * Step 2:
863 * ... or S.state == TIMEWAIT, 863 * ... or S.state == TIMEWAIT,
864 * Generate Reset(No Connection) unless P.type == Reset 864 * Generate Reset(No Connection) unless P.type == Reset
865 * Drop packet and return 865 * Drop packet and return
866 */ 866 */
@@ -872,8 +872,8 @@ static int dccp_v6_rcv(struct sk_buff **pskb)
872 872
873 /* 873 /*
874 * RFC 4340, sec. 9.2.1: Minimum Checksum Coverage 874 * RFC 4340, sec. 9.2.1: Minimum Checksum Coverage
875 * o if MinCsCov = 0, only packets with CsCov = 0 are accepted 875 * o if MinCsCov = 0, only packets with CsCov = 0 are accepted
876 * o if MinCsCov > 0, also accept packets with CsCov >= MinCsCov 876 * o if MinCsCov > 0, also accept packets with CsCov >= MinCsCov
877 */ 877 */
878 min_cov = dccp_sk(sk)->dccps_pcrlen; 878 min_cov = dccp_sk(sk)->dccps_pcrlen;
879 if (dh->dccph_cscov && (min_cov == 0 || dh->dccph_cscov < min_cov)) { 879 if (dh->dccph_cscov && (min_cov == 0 || dh->dccph_cscov < min_cov)) {
@@ -893,7 +893,7 @@ no_dccp_socket:
893 goto discard_it; 893 goto discard_it;
894 /* 894 /*
895 * Step 2: 895 * Step 2:
896 * If no socket ... 896 * If no socket ...
897 * Generate Reset(No Connection) unless P.type == Reset 897 * Generate Reset(No Connection) unless P.type == Reset
898 * Drop packet and return 898 * Drop packet and return
899 */ 899 */
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index 4c9e26775f7..6656bb497c7 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -182,7 +182,7 @@ out_free:
182 182
183EXPORT_SYMBOL_GPL(dccp_create_openreq_child); 183EXPORT_SYMBOL_GPL(dccp_create_openreq_child);
184 184
185/* 185/*
186 * Process an incoming packet for RESPOND sockets represented 186 * Process an incoming packet for RESPOND sockets represented
187 * as an request_sock. 187 * as an request_sock.
188 */ 188 */
diff --git a/net/dccp/options.c b/net/dccp/options.c
index f398b43bc05..c03ba61eb6d 100644
--- a/net/dccp/options.c
+++ b/net/dccp/options.c
@@ -557,11 +557,6 @@ int dccp_insert_options(struct sock *sk, struct sk_buff *skb)
557 return -1; 557 return -1;
558 dp->dccps_hc_rx_insert_options = 0; 558 dp->dccps_hc_rx_insert_options = 0;
559 } 559 }
560 if (dp->dccps_hc_tx_insert_options) {
561 if (ccid_hc_tx_insert_options(dp->dccps_hc_tx_ccid, sk, skb))
562 return -1;
563 dp->dccps_hc_tx_insert_options = 0;
564 }
565 560
566 /* Feature negotiation */ 561 /* Feature negotiation */
567 /* Data packets can't do feat negotiation */ 562 /* Data packets can't do feat negotiation */
diff --git a/net/dccp/output.c b/net/dccp/output.c
index 400c30b6fca..82456965908 100644
--- a/net/dccp/output.c
+++ b/net/dccp/output.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * net/dccp/output.c 2 * net/dccp/output.c
3 * 3 *
4 * An implementation of the DCCP protocol 4 * An implementation of the DCCP protocol
5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br> 5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
6 * 6 *
@@ -175,14 +175,12 @@ void dccp_write_space(struct sock *sk)
175/** 175/**
176 * dccp_wait_for_ccid - Wait for ccid to tell us we can send a packet 176 * dccp_wait_for_ccid - Wait for ccid to tell us we can send a packet
177 * @sk: socket to wait for 177 * @sk: socket to wait for
178 * @timeo: for how long
179 */ 178 */
180static int dccp_wait_for_ccid(struct sock *sk, struct sk_buff *skb, 179static int dccp_wait_for_ccid(struct sock *sk, struct sk_buff *skb)
181 long *timeo)
182{ 180{
183 struct dccp_sock *dp = dccp_sk(sk); 181 struct dccp_sock *dp = dccp_sk(sk);
184 DEFINE_WAIT(wait); 182 DEFINE_WAIT(wait);
185 long delay; 183 unsigned long delay;
186 int rc; 184 int rc;
187 185
188 while (1) { 186 while (1) {
@@ -190,8 +188,6 @@ static int dccp_wait_for_ccid(struct sock *sk, struct sk_buff *skb,
190 188
191 if (sk->sk_err) 189 if (sk->sk_err)
192 goto do_error; 190 goto do_error;
193 if (!*timeo)
194 goto do_nonblock;
195 if (signal_pending(current)) 191 if (signal_pending(current))
196 goto do_interrupted; 192 goto do_interrupted;
197 193
@@ -199,12 +195,9 @@ static int dccp_wait_for_ccid(struct sock *sk, struct sk_buff *skb,
199 if (rc <= 0) 195 if (rc <= 0)
200 break; 196 break;
201 delay = msecs_to_jiffies(rc); 197 delay = msecs_to_jiffies(rc);
202 if (delay > *timeo || delay < 0)
203 goto do_nonblock;
204
205 sk->sk_write_pending++; 198 sk->sk_write_pending++;
206 release_sock(sk); 199 release_sock(sk);
207 *timeo -= schedule_timeout(delay); 200 schedule_timeout(delay);
208 lock_sock(sk); 201 lock_sock(sk);
209 sk->sk_write_pending--; 202 sk->sk_write_pending--;
210 } 203 }
@@ -215,11 +208,8 @@ out:
215do_error: 208do_error:
216 rc = -EPIPE; 209 rc = -EPIPE;
217 goto out; 210 goto out;
218do_nonblock:
219 rc = -EAGAIN;
220 goto out;
221do_interrupted: 211do_interrupted:
222 rc = sock_intr_errno(*timeo); 212 rc = -EINTR;
223 goto out; 213 goto out;
224} 214}
225 215
@@ -240,8 +230,6 @@ void dccp_write_xmit(struct sock *sk, int block)
240{ 230{
241 struct dccp_sock *dp = dccp_sk(sk); 231 struct dccp_sock *dp = dccp_sk(sk);
242 struct sk_buff *skb; 232 struct sk_buff *skb;
243 long timeo = DCCP_XMIT_TIMEO; /* If a packet is taking longer than
244 this we have other issues */
245 233
246 while ((skb = skb_peek(&sk->sk_write_queue))) { 234 while ((skb = skb_peek(&sk->sk_write_queue))) {
247 int err = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb); 235 int err = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);
@@ -251,11 +239,9 @@ void dccp_write_xmit(struct sock *sk, int block)
251 sk_reset_timer(sk, &dp->dccps_xmit_timer, 239 sk_reset_timer(sk, &dp->dccps_xmit_timer,
252 msecs_to_jiffies(err)+jiffies); 240 msecs_to_jiffies(err)+jiffies);
253 break; 241 break;
254 } else { 242 } else
255 err = dccp_wait_for_ccid(sk, skb, &timeo); 243 err = dccp_wait_for_ccid(sk, skb);
256 timeo = DCCP_XMIT_TIMEO; 244 if (err && err != -EINTR)
257 }
258 if (err)
259 DCCP_BUG("err=%d after dccp_wait_for_ccid", err); 245 DCCP_BUG("err=%d after dccp_wait_for_ccid", err);
260 } 246 }
261 247
@@ -281,8 +267,10 @@ void dccp_write_xmit(struct sock *sk, int block)
281 if (err) 267 if (err)
282 DCCP_BUG("err=%d after ccid_hc_tx_packet_sent", 268 DCCP_BUG("err=%d after ccid_hc_tx_packet_sent",
283 err); 269 err);
284 } else 270 } else {
271 dccp_pr_debug("packet discarded\n");
285 kfree(skb); 272 kfree(skb);
273 }
286 } 274 }
287} 275}
288 276
@@ -350,7 +338,6 @@ EXPORT_SYMBOL_GPL(dccp_make_response);
350 338
351static struct sk_buff *dccp_make_reset(struct sock *sk, struct dst_entry *dst, 339static struct sk_buff *dccp_make_reset(struct sock *sk, struct dst_entry *dst,
352 const enum dccp_reset_codes code) 340 const enum dccp_reset_codes code)
353
354{ 341{
355 struct dccp_hdr *dh; 342 struct dccp_hdr *dh;
356 struct dccp_sock *dp = dccp_sk(sk); 343 struct dccp_sock *dp = dccp_sk(sk);
@@ -431,14 +418,14 @@ static inline void dccp_connect_init(struct sock *sk)
431 418
432 dccp_sync_mss(sk, dst_mtu(dst)); 419 dccp_sync_mss(sk, dst_mtu(dst));
433 420
434 /* 421 /*
435 * SWL and AWL are initially adjusted so that they are not less than 422 * SWL and AWL are initially adjusted so that they are not less than
436 * the initial Sequence Numbers received and sent, respectively: 423 * the initial Sequence Numbers received and sent, respectively:
437 * SWL := max(GSR + 1 - floor(W/4), ISR), 424 * SWL := max(GSR + 1 - floor(W/4), ISR),
438 * AWL := max(GSS - W' + 1, ISS). 425 * AWL := max(GSS - W' + 1, ISS).
439 * These adjustments MUST be applied only at the beginning of the 426 * These adjustments MUST be applied only at the beginning of the
440 * connection. 427 * connection.
441 */ 428 */
442 dccp_update_gss(sk, dp->dccps_iss); 429 dccp_update_gss(sk, dp->dccps_iss);
443 dccp_set_seqno(&dp->dccps_awl, max48(dp->dccps_awl, dp->dccps_iss)); 430 dccp_set_seqno(&dp->dccps_awl, max48(dp->dccps_awl, dp->dccps_iss));
444 431
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 5ec47d9ee44..63b3fa20e14 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -196,7 +196,7 @@ int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized)
196 sk, GFP_KERNEL); 196 sk, GFP_KERNEL);
197 dp->dccps_hc_tx_ccid = ccid_hc_tx_new(dmsk->dccpms_tx_ccid, 197 dp->dccps_hc_tx_ccid = ccid_hc_tx_new(dmsk->dccpms_tx_ccid,
198 sk, GFP_KERNEL); 198 sk, GFP_KERNEL);
199 if (unlikely(dp->dccps_hc_rx_ccid == NULL || 199 if (unlikely(dp->dccps_hc_rx_ccid == NULL ||
200 dp->dccps_hc_tx_ccid == NULL)) { 200 dp->dccps_hc_tx_ccid == NULL)) {
201 ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk); 201 ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
202 ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk); 202 ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
@@ -390,7 +390,7 @@ static int dccp_setsockopt_service(struct sock *sk, const __be32 service,
390 struct dccp_sock *dp = dccp_sk(sk); 390 struct dccp_sock *dp = dccp_sk(sk);
391 struct dccp_service_list *sl = NULL; 391 struct dccp_service_list *sl = NULL;
392 392
393 if (service == DCCP_SERVICE_INVALID_VALUE || 393 if (service == DCCP_SERVICE_INVALID_VALUE ||
394 optlen > DCCP_SERVICE_LIST_MAX_LEN * sizeof(u32)) 394 optlen > DCCP_SERVICE_LIST_MAX_LEN * sizeof(u32))
395 return -EINVAL; 395 return -EINVAL;
396 396
@@ -830,7 +830,7 @@ EXPORT_SYMBOL_GPL(inet_dccp_listen);
830static const unsigned char dccp_new_state[] = { 830static const unsigned char dccp_new_state[] = {
831 /* current state: new state: action: */ 831 /* current state: new state: action: */
832 [0] = DCCP_CLOSED, 832 [0] = DCCP_CLOSED,
833 [DCCP_OPEN] = DCCP_CLOSING | DCCP_ACTION_FIN, 833 [DCCP_OPEN] = DCCP_CLOSING | DCCP_ACTION_FIN,
834 [DCCP_REQUESTING] = DCCP_CLOSED, 834 [DCCP_REQUESTING] = DCCP_CLOSED,
835 [DCCP_PARTOPEN] = DCCP_CLOSING | DCCP_ACTION_FIN, 835 [DCCP_PARTOPEN] = DCCP_CLOSING | DCCP_ACTION_FIN,
836 [DCCP_LISTEN] = DCCP_CLOSED, 836 [DCCP_LISTEN] = DCCP_CLOSED,
diff --git a/net/dccp/timer.c b/net/dccp/timer.c
index e8f519e7f48..e5348f369c6 100644
--- a/net/dccp/timer.c
+++ b/net/dccp/timer.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * net/dccp/timer.c 2 * net/dccp/timer.c
3 * 3 *
4 * An implementation of the DCCP protocol 4 * An implementation of the DCCP protocol
5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br> 5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
6 * 6 *
@@ -102,13 +102,13 @@ static void dccp_retransmit_timer(struct sock *sk)
102 * sk->sk_send_head has to have one skb with 102 * sk->sk_send_head has to have one skb with
103 * DCCP_SKB_CB(skb)->dccpd_type set to one of the retransmittable DCCP 103 * DCCP_SKB_CB(skb)->dccpd_type set to one of the retransmittable DCCP
104 * packet types. The only packets eligible for retransmission are: 104 * packet types. The only packets eligible for retransmission are:
105 * -- Requests in client-REQUEST state (sec. 8.1.1) 105 * -- Requests in client-REQUEST state (sec. 8.1.1)
106 * -- Acks in client-PARTOPEN state (sec. 8.1.5) 106 * -- Acks in client-PARTOPEN state (sec. 8.1.5)
107 * -- CloseReq in server-CLOSEREQ state (sec. 8.3) 107 * -- CloseReq in server-CLOSEREQ state (sec. 8.3)
108 * -- Close in node-CLOSING state (sec. 8.3) */ 108 * -- Close in node-CLOSING state (sec. 8.3) */
109 BUG_TRAP(sk->sk_send_head != NULL); 109 BUG_TRAP(sk->sk_send_head != NULL);
110 110
111 /* 111 /*
112 * More than than 4MSL (8 minutes) has passed, a RESET(aborted) was 112 * More than than 4MSL (8 minutes) has passed, a RESET(aborted) was
113 * sent, no need to retransmit, this sock is dead. 113 * sent, no need to retransmit, this sock is dead.
114 */ 114 */
@@ -200,7 +200,7 @@ static void dccp_keepalive_timer(unsigned long data)
200 /* Only process if socket is not in use. */ 200 /* Only process if socket is not in use. */
201 bh_lock_sock(sk); 201 bh_lock_sock(sk);
202 if (sock_owned_by_user(sk)) { 202 if (sock_owned_by_user(sk)) {
203 /* Try again later. */ 203 /* Try again later. */
204 inet_csk_reset_keepalive_timer(sk, HZ / 20); 204 inet_csk_reset_keepalive_timer(sk, HZ / 20);
205 goto out; 205 goto out;
206 } 206 }
diff --git a/net/ipv4/ipvs/ip_vs_sync.c b/net/ipv4/ipvs/ip_vs_sync.c
index 91a075edd68..7ea2d981a93 100644
--- a/net/ipv4/ipvs/ip_vs_sync.c
+++ b/net/ipv4/ipvs/ip_vs_sync.c
@@ -657,7 +657,7 @@ static void sync_master_loop(void)
657 if (stop_master_sync) 657 if (stop_master_sync)
658 break; 658 break;
659 659
660 ssleep(1); 660 msleep_interruptible(1000);
661 } 661 }
662 662
663 /* clean up the sync_buff queue */ 663 /* clean up the sync_buff queue */
@@ -714,7 +714,7 @@ static void sync_backup_loop(void)
714 if (stop_backup_sync) 714 if (stop_backup_sync)
715 break; 715 break;
716 716
717 ssleep(1); 717 msleep_interruptible(1000);
718 } 718 }
719 719
720 /* release the sending multicast socket */ 720 /* release the sending multicast socket */
@@ -826,7 +826,7 @@ static int fork_sync_thread(void *startup)
826 if ((pid = kernel_thread(sync_thread, startup, 0)) < 0) { 826 if ((pid = kernel_thread(sync_thread, startup, 0)) < 0) {
827 IP_VS_ERR("could not create sync_thread due to %d... " 827 IP_VS_ERR("could not create sync_thread due to %d... "
828 "retrying.\n", pid); 828 "retrying.\n", pid);
829 ssleep(1); 829 msleep_interruptible(1000);
830 goto repeat; 830 goto repeat;
831 } 831 }
832 832
@@ -849,10 +849,12 @@ int start_sync_thread(int state, char *mcast_ifn, __u8 syncid)
849 849
850 ip_vs_sync_state |= state; 850 ip_vs_sync_state |= state;
851 if (state == IP_VS_STATE_MASTER) { 851 if (state == IP_VS_STATE_MASTER) {
852 strlcpy(ip_vs_master_mcast_ifn, mcast_ifn, sizeof(ip_vs_master_mcast_ifn)); 852 strlcpy(ip_vs_master_mcast_ifn, mcast_ifn,
853 sizeof(ip_vs_master_mcast_ifn));
853 ip_vs_master_syncid = syncid; 854 ip_vs_master_syncid = syncid;
854 } else { 855 } else {
855 strlcpy(ip_vs_backup_mcast_ifn, mcast_ifn, sizeof(ip_vs_backup_mcast_ifn)); 856 strlcpy(ip_vs_backup_mcast_ifn, mcast_ifn,
857 sizeof(ip_vs_backup_mcast_ifn));
856 ip_vs_backup_syncid = syncid; 858 ip_vs_backup_syncid = syncid;
857 } 859 }
858 860
@@ -860,7 +862,7 @@ int start_sync_thread(int state, char *mcast_ifn, __u8 syncid)
860 if ((pid = kernel_thread(fork_sync_thread, &startup, 0)) < 0) { 862 if ((pid = kernel_thread(fork_sync_thread, &startup, 0)) < 0) {
861 IP_VS_ERR("could not create fork_sync_thread due to %d... " 863 IP_VS_ERR("could not create fork_sync_thread due to %d... "
862 "retrying.\n", pid); 864 "retrying.\n", pid);
863 ssleep(1); 865 msleep_interruptible(1000);
864 goto repeat; 866 goto repeat;
865 } 867 }
866 868
@@ -880,7 +882,8 @@ int stop_sync_thread(int state)
880 882
881 IP_VS_DBG(7, "%s: pid %d\n", __FUNCTION__, current->pid); 883 IP_VS_DBG(7, "%s: pid %d\n", __FUNCTION__, current->pid);
882 IP_VS_INFO("stopping sync thread %d ...\n", 884 IP_VS_INFO("stopping sync thread %d ...\n",
883 (state == IP_VS_STATE_MASTER) ? sync_master_pid : sync_backup_pid); 885 (state == IP_VS_STATE_MASTER) ?
886 sync_master_pid : sync_backup_pid);
884 887
885 __set_current_state(TASK_UNINTERRUPTIBLE); 888 __set_current_state(TASK_UNINTERRUPTIBLE);
886 add_wait_queue(&stop_sync_wait, &wait); 889 add_wait_queue(&stop_sync_wait, &wait);