aboutsummaryrefslogtreecommitdiffstats
path: root/net/dccp
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
commitada47b5fe13d89735805b566185f4885f5a3f750 (patch)
tree644b88f8a71896307d71438e9b3af49126ffb22b /net/dccp
parent43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff)
parent3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff)
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'net/dccp')
-rw-r--r--net/dccp/ccid.c31
-rw-r--r--net/dccp/ccid.h6
-rw-r--r--net/dccp/ccids/ccid2.c328
-rw-r--r--net/dccp/ccids/ccid2.h46
-rw-r--r--net/dccp/ccids/ccid3.c400
-rw-r--r--net/dccp/ccids/ccid3.h120
-rw-r--r--net/dccp/feat.c1
-rw-r--r--net/dccp/input.c1
-rw-r--r--net/dccp/ipv4.c61
-rw-r--r--net/dccp/ipv6.c54
-rw-r--r--net/dccp/minisocks.c5
-rw-r--r--net/dccp/output.c5
-rw-r--r--net/dccp/probe.c55
-rw-r--r--net/dccp/proto.c41
-rw-r--r--net/dccp/sysctl.c8
-rw-r--r--net/dccp/timer.c4
16 files changed, 575 insertions, 591 deletions
diff --git a/net/dccp/ccid.c b/net/dccp/ccid.c
index f3e9ba1cfd01..36479ca61e03 100644
--- a/net/dccp/ccid.c
+++ b/net/dccp/ccid.c
@@ -11,6 +11,8 @@
11 * published by the Free Software Foundation. 11 * published by the Free Software Foundation.
12 */ 12 */
13 13
14#include <linux/slab.h>
15
14#include "ccid.h" 16#include "ccid.h"
15#include "ccids/lib/tfrc.h" 17#include "ccids/lib/tfrc.h"
16 18
@@ -63,48 +65,37 @@ int ccid_getsockopt_builtin_ccids(struct sock *sk, int len,
63 u8 *ccid_array, array_len; 65 u8 *ccid_array, array_len;
64 int err = 0; 66 int err = 0;
65 67
66 if (len < ARRAY_SIZE(ccids))
67 return -EINVAL;
68
69 if (ccid_get_builtin_ccids(&ccid_array, &array_len)) 68 if (ccid_get_builtin_ccids(&ccid_array, &array_len))
70 return -ENOBUFS; 69 return -ENOBUFS;
71 70
72 if (put_user(array_len, optlen) || 71 if (put_user(array_len, optlen))
73 copy_to_user(optval, ccid_array, array_len)) 72 err = -EFAULT;
73 else if (len > 0 && copy_to_user(optval, ccid_array,
74 len > array_len ? array_len : len))
74 err = -EFAULT; 75 err = -EFAULT;
75 76
76 kfree(ccid_array); 77 kfree(ccid_array);
77 return err; 78 return err;
78} 79}
79 80
80static struct kmem_cache *ccid_kmem_cache_create(int obj_size, const char *fmt,...) 81static struct kmem_cache *ccid_kmem_cache_create(int obj_size, char *slab_name_fmt, const char *fmt,...)
81{ 82{
82 struct kmem_cache *slab; 83 struct kmem_cache *slab;
83 char slab_name_fmt[32], *slab_name;
84 va_list args; 84 va_list args;
85 85
86 va_start(args, fmt); 86 va_start(args, fmt);
87 vsnprintf(slab_name_fmt, sizeof(slab_name_fmt), fmt, args); 87 vsnprintf(slab_name_fmt, CCID_SLAB_NAME_LENGTH, fmt, args);
88 va_end(args); 88 va_end(args);
89 89
90 slab_name = kstrdup(slab_name_fmt, GFP_KERNEL); 90 slab = kmem_cache_create(slab_name_fmt, sizeof(struct ccid) + obj_size, 0,
91 if (slab_name == NULL)
92 return NULL;
93 slab = kmem_cache_create(slab_name, sizeof(struct ccid) + obj_size, 0,
94 SLAB_HWCACHE_ALIGN, NULL); 91 SLAB_HWCACHE_ALIGN, NULL);
95 if (slab == NULL)
96 kfree(slab_name);
97 return slab; 92 return slab;
98} 93}
99 94
100static void ccid_kmem_cache_destroy(struct kmem_cache *slab) 95static void ccid_kmem_cache_destroy(struct kmem_cache *slab)
101{ 96{
102 if (slab != NULL) { 97 if (slab != NULL)
103 const char *name = kmem_cache_name(slab);
104
105 kmem_cache_destroy(slab); 98 kmem_cache_destroy(slab);
106 kfree(name);
107 }
108} 99}
109 100
110static int ccid_activate(struct ccid_operations *ccid_ops) 101static int ccid_activate(struct ccid_operations *ccid_ops)
@@ -113,6 +104,7 @@ static int ccid_activate(struct ccid_operations *ccid_ops)
113 104
114 ccid_ops->ccid_hc_rx_slab = 105 ccid_ops->ccid_hc_rx_slab =
115 ccid_kmem_cache_create(ccid_ops->ccid_hc_rx_obj_size, 106 ccid_kmem_cache_create(ccid_ops->ccid_hc_rx_obj_size,
107 ccid_ops->ccid_hc_rx_slab_name,
116 "ccid%u_hc_rx_sock", 108 "ccid%u_hc_rx_sock",
117 ccid_ops->ccid_id); 109 ccid_ops->ccid_id);
118 if (ccid_ops->ccid_hc_rx_slab == NULL) 110 if (ccid_ops->ccid_hc_rx_slab == NULL)
@@ -120,6 +112,7 @@ static int ccid_activate(struct ccid_operations *ccid_ops)
120 112
121 ccid_ops->ccid_hc_tx_slab = 113 ccid_ops->ccid_hc_tx_slab =
122 ccid_kmem_cache_create(ccid_ops->ccid_hc_tx_obj_size, 114 ccid_kmem_cache_create(ccid_ops->ccid_hc_tx_obj_size,
115 ccid_ops->ccid_hc_tx_slab_name,
123 "ccid%u_hc_tx_sock", 116 "ccid%u_hc_tx_sock",
124 ccid_ops->ccid_id); 117 ccid_ops->ccid_id);
125 if (ccid_ops->ccid_hc_tx_slab == NULL) 118 if (ccid_ops->ccid_hc_tx_slab == NULL)
diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h
index facedd20b531..6df6f8ac9636 100644
--- a/net/dccp/ccid.h
+++ b/net/dccp/ccid.h
@@ -19,7 +19,9 @@
19#include <linux/list.h> 19#include <linux/list.h>
20#include <linux/module.h> 20#include <linux/module.h>
21 21
22#define CCID_MAX 255 22/* maximum value for a CCID (RFC 4340, 19.5) */
23#define CCID_MAX 255
24#define CCID_SLAB_NAME_LENGTH 32
23 25
24struct tcp_info; 26struct tcp_info;
25 27
@@ -49,6 +51,8 @@ struct ccid_operations {
49 const char *ccid_name; 51 const char *ccid_name;
50 struct kmem_cache *ccid_hc_rx_slab, 52 struct kmem_cache *ccid_hc_rx_slab,
51 *ccid_hc_tx_slab; 53 *ccid_hc_tx_slab;
54 char ccid_hc_rx_slab_name[CCID_SLAB_NAME_LENGTH];
55 char ccid_hc_tx_slab_name[CCID_SLAB_NAME_LENGTH];
52 __u32 ccid_hc_rx_obj_size, 56 __u32 ccid_hc_rx_obj_size,
53 ccid_hc_tx_obj_size; 57 ccid_hc_tx_obj_size;
54 /* Interface Routines */ 58 /* Interface Routines */
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index e8cf99e880b0..9b3ae9922be1 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -23,6 +23,7 @@
23/* 23/*
24 * This implementation should follow RFC 4341 24 * This implementation should follow RFC 4341
25 */ 25 */
26#include <linux/slab.h>
26#include "../feat.h" 27#include "../feat.h"
27#include "../ccid.h" 28#include "../ccid.h"
28#include "../dccp.h" 29#include "../dccp.h"
@@ -33,20 +34,20 @@
33static int ccid2_debug; 34static int ccid2_debug;
34#define ccid2_pr_debug(format, a...) DCCP_PR_DEBUG(ccid2_debug, format, ##a) 35#define ccid2_pr_debug(format, a...) DCCP_PR_DEBUG(ccid2_debug, format, ##a)
35 36
36static void ccid2_hc_tx_check_sanity(const struct ccid2_hc_tx_sock *hctx) 37static void ccid2_hc_tx_check_sanity(const struct ccid2_hc_tx_sock *hc)
37{ 38{
38 int len = 0; 39 int len = 0;
39 int pipe = 0; 40 int pipe = 0;
40 struct ccid2_seq *seqp = hctx->ccid2hctx_seqh; 41 struct ccid2_seq *seqp = hc->tx_seqh;
41 42
42 /* there is data in the chain */ 43 /* there is data in the chain */
43 if (seqp != hctx->ccid2hctx_seqt) { 44 if (seqp != hc->tx_seqt) {
44 seqp = seqp->ccid2s_prev; 45 seqp = seqp->ccid2s_prev;
45 len++; 46 len++;
46 if (!seqp->ccid2s_acked) 47 if (!seqp->ccid2s_acked)
47 pipe++; 48 pipe++;
48 49
49 while (seqp != hctx->ccid2hctx_seqt) { 50 while (seqp != hc->tx_seqt) {
50 struct ccid2_seq *prev = seqp->ccid2s_prev; 51 struct ccid2_seq *prev = seqp->ccid2s_prev;
51 52
52 len++; 53 len++;
@@ -63,30 +64,30 @@ static void ccid2_hc_tx_check_sanity(const struct ccid2_hc_tx_sock *hctx)
63 } 64 }
64 } 65 }
65 66
66 BUG_ON(pipe != hctx->ccid2hctx_pipe); 67 BUG_ON(pipe != hc->tx_pipe);
67 ccid2_pr_debug("len of chain=%d\n", len); 68 ccid2_pr_debug("len of chain=%d\n", len);
68 69
69 do { 70 do {
70 seqp = seqp->ccid2s_prev; 71 seqp = seqp->ccid2s_prev;
71 len++; 72 len++;
72 } while (seqp != hctx->ccid2hctx_seqh); 73 } while (seqp != hc->tx_seqh);
73 74
74 ccid2_pr_debug("total len=%d\n", len); 75 ccid2_pr_debug("total len=%d\n", len);
75 BUG_ON(len != hctx->ccid2hctx_seqbufc * CCID2_SEQBUF_LEN); 76 BUG_ON(len != hc->tx_seqbufc * CCID2_SEQBUF_LEN);
76} 77}
77#else 78#else
78#define ccid2_pr_debug(format, a...) 79#define ccid2_pr_debug(format, a...)
79#define ccid2_hc_tx_check_sanity(hctx) 80#define ccid2_hc_tx_check_sanity(hc)
80#endif 81#endif
81 82
82static int ccid2_hc_tx_alloc_seq(struct ccid2_hc_tx_sock *hctx) 83static int ccid2_hc_tx_alloc_seq(struct ccid2_hc_tx_sock *hc)
83{ 84{
84 struct ccid2_seq *seqp; 85 struct ccid2_seq *seqp;
85 int i; 86 int i;
86 87
87 /* check if we have space to preserve the pointer to the buffer */ 88 /* check if we have space to preserve the pointer to the buffer */
88 if (hctx->ccid2hctx_seqbufc >= (sizeof(hctx->ccid2hctx_seqbuf) / 89 if (hc->tx_seqbufc >= (sizeof(hc->tx_seqbuf) /
89 sizeof(struct ccid2_seq*))) 90 sizeof(struct ccid2_seq *)))
90 return -ENOMEM; 91 return -ENOMEM;
91 92
92 /* allocate buffer and initialize linked list */ 93 /* allocate buffer and initialize linked list */
@@ -102,29 +103,29 @@ static int ccid2_hc_tx_alloc_seq(struct ccid2_hc_tx_sock *hctx)
102 seqp->ccid2s_prev = &seqp[CCID2_SEQBUF_LEN - 1]; 103 seqp->ccid2s_prev = &seqp[CCID2_SEQBUF_LEN - 1];
103 104
104 /* This is the first allocation. Initiate the head and tail. */ 105 /* This is the first allocation. Initiate the head and tail. */
105 if (hctx->ccid2hctx_seqbufc == 0) 106 if (hc->tx_seqbufc == 0)
106 hctx->ccid2hctx_seqh = hctx->ccid2hctx_seqt = seqp; 107 hc->tx_seqh = hc->tx_seqt = seqp;
107 else { 108 else {
108 /* link the existing list with the one we just created */ 109 /* link the existing list with the one we just created */
109 hctx->ccid2hctx_seqh->ccid2s_next = seqp; 110 hc->tx_seqh->ccid2s_next = seqp;
110 seqp->ccid2s_prev = hctx->ccid2hctx_seqh; 111 seqp->ccid2s_prev = hc->tx_seqh;
111 112
112 hctx->ccid2hctx_seqt->ccid2s_prev = &seqp[CCID2_SEQBUF_LEN - 1]; 113 hc->tx_seqt->ccid2s_prev = &seqp[CCID2_SEQBUF_LEN - 1];
113 seqp[CCID2_SEQBUF_LEN - 1].ccid2s_next = hctx->ccid2hctx_seqt; 114 seqp[CCID2_SEQBUF_LEN - 1].ccid2s_next = hc->tx_seqt;
114 } 115 }
115 116
116 /* store the original pointer to the buffer so we can free it */ 117 /* store the original pointer to the buffer so we can free it */
117 hctx->ccid2hctx_seqbuf[hctx->ccid2hctx_seqbufc] = seqp; 118 hc->tx_seqbuf[hc->tx_seqbufc] = seqp;
118 hctx->ccid2hctx_seqbufc++; 119 hc->tx_seqbufc++;
119 120
120 return 0; 121 return 0;
121} 122}
122 123
123static int ccid2_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb) 124static int ccid2_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
124{ 125{
125 struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); 126 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
126 127
127 if (hctx->ccid2hctx_pipe < hctx->ccid2hctx_cwnd) 128 if (hc->tx_pipe < hc->tx_cwnd)
128 return 0; 129 return 0;
129 130
130 return 1; /* XXX CCID should dequeue when ready instead of polling */ 131 return 1; /* XXX CCID should dequeue when ready instead of polling */
@@ -133,7 +134,7 @@ static int ccid2_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
133static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val) 134static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val)
134{ 135{
135 struct dccp_sock *dp = dccp_sk(sk); 136 struct dccp_sock *dp = dccp_sk(sk);
136 u32 max_ratio = DIV_ROUND_UP(ccid2_hc_tx_sk(sk)->ccid2hctx_cwnd, 2); 137 u32 max_ratio = DIV_ROUND_UP(ccid2_hc_tx_sk(sk)->tx_cwnd, 2);
137 138
138 /* 139 /*
139 * Ensure that Ack Ratio does not exceed ceil(cwnd/2), which is (2) from 140 * Ensure that Ack Ratio does not exceed ceil(cwnd/2), which is (2) from
@@ -155,10 +156,10 @@ static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val)
155 dp->dccps_l_ack_ratio = val; 156 dp->dccps_l_ack_ratio = val;
156} 157}
157 158
158static void ccid2_change_srtt(struct ccid2_hc_tx_sock *hctx, long val) 159static void ccid2_change_srtt(struct ccid2_hc_tx_sock *hc, long val)
159{ 160{
160 ccid2_pr_debug("change SRTT to %ld\n", val); 161 ccid2_pr_debug("change SRTT to %ld\n", val);
161 hctx->ccid2hctx_srtt = val; 162 hc->tx_srtt = val;
162} 163}
163 164
164static void ccid2_start_rto_timer(struct sock *sk); 165static void ccid2_start_rto_timer(struct sock *sk);
@@ -166,45 +167,44 @@ static void ccid2_start_rto_timer(struct sock *sk);
166static void ccid2_hc_tx_rto_expire(unsigned long data) 167static void ccid2_hc_tx_rto_expire(unsigned long data)
167{ 168{
168 struct sock *sk = (struct sock *)data; 169 struct sock *sk = (struct sock *)data;
169 struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); 170 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
170 long s; 171 long s;
171 172
172 bh_lock_sock(sk); 173 bh_lock_sock(sk);
173 if (sock_owned_by_user(sk)) { 174 if (sock_owned_by_user(sk)) {
174 sk_reset_timer(sk, &hctx->ccid2hctx_rtotimer, 175 sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + HZ / 5);
175 jiffies + HZ / 5);
176 goto out; 176 goto out;
177 } 177 }
178 178
179 ccid2_pr_debug("RTO_EXPIRE\n"); 179 ccid2_pr_debug("RTO_EXPIRE\n");
180 180
181 ccid2_hc_tx_check_sanity(hctx); 181 ccid2_hc_tx_check_sanity(hc);
182 182
183 /* back-off timer */ 183 /* back-off timer */
184 hctx->ccid2hctx_rto <<= 1; 184 hc->tx_rto <<= 1;
185 185
186 s = hctx->ccid2hctx_rto / HZ; 186 s = hc->tx_rto / HZ;
187 if (s > 60) 187 if (s > 60)
188 hctx->ccid2hctx_rto = 60 * HZ; 188 hc->tx_rto = 60 * HZ;
189 189
190 ccid2_start_rto_timer(sk); 190 ccid2_start_rto_timer(sk);
191 191
192 /* adjust pipe, cwnd etc */ 192 /* adjust pipe, cwnd etc */
193 hctx->ccid2hctx_ssthresh = hctx->ccid2hctx_cwnd / 2; 193 hc->tx_ssthresh = hc->tx_cwnd / 2;
194 if (hctx->ccid2hctx_ssthresh < 2) 194 if (hc->tx_ssthresh < 2)
195 hctx->ccid2hctx_ssthresh = 2; 195 hc->tx_ssthresh = 2;
196 hctx->ccid2hctx_cwnd = 1; 196 hc->tx_cwnd = 1;
197 hctx->ccid2hctx_pipe = 0; 197 hc->tx_pipe = 0;
198 198
199 /* clear state about stuff we sent */ 199 /* clear state about stuff we sent */
200 hctx->ccid2hctx_seqt = hctx->ccid2hctx_seqh; 200 hc->tx_seqt = hc->tx_seqh;
201 hctx->ccid2hctx_packets_acked = 0; 201 hc->tx_packets_acked = 0;
202 202
203 /* clear ack ratio state. */ 203 /* clear ack ratio state. */
204 hctx->ccid2hctx_rpseq = 0; 204 hc->tx_rpseq = 0;
205 hctx->ccid2hctx_rpdupack = -1; 205 hc->tx_rpdupack = -1;
206 ccid2_change_l_ack_ratio(sk, 1); 206 ccid2_change_l_ack_ratio(sk, 1);
207 ccid2_hc_tx_check_sanity(hctx); 207 ccid2_hc_tx_check_sanity(hc);
208out: 208out:
209 bh_unlock_sock(sk); 209 bh_unlock_sock(sk);
210 sock_put(sk); 210 sock_put(sk);
@@ -212,42 +212,40 @@ out:
212 212
213static void ccid2_start_rto_timer(struct sock *sk) 213static void ccid2_start_rto_timer(struct sock *sk)
214{ 214{
215 struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); 215 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
216 216
217 ccid2_pr_debug("setting RTO timeout=%ld\n", hctx->ccid2hctx_rto); 217 ccid2_pr_debug("setting RTO timeout=%ld\n", hc->tx_rto);
218 218
219 BUG_ON(timer_pending(&hctx->ccid2hctx_rtotimer)); 219 BUG_ON(timer_pending(&hc->tx_rtotimer));
220 sk_reset_timer(sk, &hctx->ccid2hctx_rtotimer, 220 sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
221 jiffies + hctx->ccid2hctx_rto);
222} 221}
223 222
224static void ccid2_hc_tx_packet_sent(struct sock *sk, int more, unsigned int len) 223static void ccid2_hc_tx_packet_sent(struct sock *sk, int more, unsigned int len)
225{ 224{
226 struct dccp_sock *dp = dccp_sk(sk); 225 struct dccp_sock *dp = dccp_sk(sk);
227 struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); 226 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
228 struct ccid2_seq *next; 227 struct ccid2_seq *next;
229 228
230 hctx->ccid2hctx_pipe++; 229 hc->tx_pipe++;
231 230
232 hctx->ccid2hctx_seqh->ccid2s_seq = dp->dccps_gss; 231 hc->tx_seqh->ccid2s_seq = dp->dccps_gss;
233 hctx->ccid2hctx_seqh->ccid2s_acked = 0; 232 hc->tx_seqh->ccid2s_acked = 0;
234 hctx->ccid2hctx_seqh->ccid2s_sent = jiffies; 233 hc->tx_seqh->ccid2s_sent = jiffies;
235 234
236 next = hctx->ccid2hctx_seqh->ccid2s_next; 235 next = hc->tx_seqh->ccid2s_next;
237 /* check if we need to alloc more space */ 236 /* check if we need to alloc more space */
238 if (next == hctx->ccid2hctx_seqt) { 237 if (next == hc->tx_seqt) {
239 if (ccid2_hc_tx_alloc_seq(hctx)) { 238 if (ccid2_hc_tx_alloc_seq(hc)) {
240 DCCP_CRIT("packet history - out of memory!"); 239 DCCP_CRIT("packet history - out of memory!");
241 /* FIXME: find a more graceful way to bail out */ 240 /* FIXME: find a more graceful way to bail out */
242 return; 241 return;
243 } 242 }
244 next = hctx->ccid2hctx_seqh->ccid2s_next; 243 next = hc->tx_seqh->ccid2s_next;
245 BUG_ON(next == hctx->ccid2hctx_seqt); 244 BUG_ON(next == hc->tx_seqt);
246 } 245 }
247 hctx->ccid2hctx_seqh = next; 246 hc->tx_seqh = next;
248 247
249 ccid2_pr_debug("cwnd=%d pipe=%d\n", hctx->ccid2hctx_cwnd, 248 ccid2_pr_debug("cwnd=%d pipe=%d\n", hc->tx_cwnd, hc->tx_pipe);
250 hctx->ccid2hctx_pipe);
251 249
252 /* 250 /*
253 * FIXME: The code below is broken and the variables have been removed 251 * FIXME: The code below is broken and the variables have been removed
@@ -270,12 +268,12 @@ static void ccid2_hc_tx_packet_sent(struct sock *sk, int more, unsigned int len)
270 */ 268 */
271#if 0 269#if 0
272 /* Ack Ratio. Need to maintain a concept of how many windows we sent */ 270 /* Ack Ratio. Need to maintain a concept of how many windows we sent */
273 hctx->ccid2hctx_arsent++; 271 hc->tx_arsent++;
274 /* We had an ack loss in this window... */ 272 /* We had an ack loss in this window... */
275 if (hctx->ccid2hctx_ackloss) { 273 if (hc->tx_ackloss) {
276 if (hctx->ccid2hctx_arsent >= hctx->ccid2hctx_cwnd) { 274 if (hc->tx_arsent >= hc->tx_cwnd) {
277 hctx->ccid2hctx_arsent = 0; 275 hc->tx_arsent = 0;
278 hctx->ccid2hctx_ackloss = 0; 276 hc->tx_ackloss = 0;
279 } 277 }
280 } else { 278 } else {
281 /* No acks lost up to now... */ 279 /* No acks lost up to now... */
@@ -285,28 +283,28 @@ static void ccid2_hc_tx_packet_sent(struct sock *sk, int more, unsigned int len)
285 int denom = dp->dccps_l_ack_ratio * dp->dccps_l_ack_ratio - 283 int denom = dp->dccps_l_ack_ratio * dp->dccps_l_ack_ratio -
286 dp->dccps_l_ack_ratio; 284 dp->dccps_l_ack_ratio;
287 285
288 denom = hctx->ccid2hctx_cwnd * hctx->ccid2hctx_cwnd / denom; 286 denom = hc->tx_cwnd * hc->tx_cwnd / denom;
289 287
290 if (hctx->ccid2hctx_arsent >= denom) { 288 if (hc->tx_arsent >= denom) {
291 ccid2_change_l_ack_ratio(sk, dp->dccps_l_ack_ratio - 1); 289 ccid2_change_l_ack_ratio(sk, dp->dccps_l_ack_ratio - 1);
292 hctx->ccid2hctx_arsent = 0; 290 hc->tx_arsent = 0;
293 } 291 }
294 } else { 292 } else {
295 /* we can't increase ack ratio further [1] */ 293 /* we can't increase ack ratio further [1] */
296 hctx->ccid2hctx_arsent = 0; /* or maybe set it to cwnd*/ 294 hc->tx_arsent = 0; /* or maybe set it to cwnd*/
297 } 295 }
298 } 296 }
299#endif 297#endif
300 298
301 /* setup RTO timer */ 299 /* setup RTO timer */
302 if (!timer_pending(&hctx->ccid2hctx_rtotimer)) 300 if (!timer_pending(&hc->tx_rtotimer))
303 ccid2_start_rto_timer(sk); 301 ccid2_start_rto_timer(sk);
304 302
305#ifdef CONFIG_IP_DCCP_CCID2_DEBUG 303#ifdef CONFIG_IP_DCCP_CCID2_DEBUG
306 do { 304 do {
307 struct ccid2_seq *seqp = hctx->ccid2hctx_seqt; 305 struct ccid2_seq *seqp = hc->tx_seqt;
308 306
309 while (seqp != hctx->ccid2hctx_seqh) { 307 while (seqp != hc->tx_seqh) {
310 ccid2_pr_debug("out seq=%llu acked=%d time=%lu\n", 308 ccid2_pr_debug("out seq=%llu acked=%d time=%lu\n",
311 (unsigned long long)seqp->ccid2s_seq, 309 (unsigned long long)seqp->ccid2s_seq,
312 seqp->ccid2s_acked, seqp->ccid2s_sent); 310 seqp->ccid2s_acked, seqp->ccid2s_sent);
@@ -314,7 +312,7 @@ static void ccid2_hc_tx_packet_sent(struct sock *sk, int more, unsigned int len)
314 } 312 }
315 } while (0); 313 } while (0);
316 ccid2_pr_debug("=========\n"); 314 ccid2_pr_debug("=========\n");
317 ccid2_hc_tx_check_sanity(hctx); 315 ccid2_hc_tx_check_sanity(hc);
318#endif 316#endif
319} 317}
320 318
@@ -382,9 +380,9 @@ out_invalid_option:
382 380
383static void ccid2_hc_tx_kill_rto_timer(struct sock *sk) 381static void ccid2_hc_tx_kill_rto_timer(struct sock *sk)
384{ 382{
385 struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); 383 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
386 384
387 sk_stop_timer(sk, &hctx->ccid2hctx_rtotimer); 385 sk_stop_timer(sk, &hc->tx_rtotimer);
388 ccid2_pr_debug("deleted RTO timer\n"); 386 ccid2_pr_debug("deleted RTO timer\n");
389} 387}
390 388
@@ -392,75 +390,75 @@ static inline void ccid2_new_ack(struct sock *sk,
392 struct ccid2_seq *seqp, 390 struct ccid2_seq *seqp,
393 unsigned int *maxincr) 391 unsigned int *maxincr)
394{ 392{
395 struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); 393 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
396 394
397 if (hctx->ccid2hctx_cwnd < hctx->ccid2hctx_ssthresh) { 395 if (hc->tx_cwnd < hc->tx_ssthresh) {
398 if (*maxincr > 0 && ++hctx->ccid2hctx_packets_acked == 2) { 396 if (*maxincr > 0 && ++hc->tx_packets_acked == 2) {
399 hctx->ccid2hctx_cwnd += 1; 397 hc->tx_cwnd += 1;
400 *maxincr -= 1; 398 *maxincr -= 1;
401 hctx->ccid2hctx_packets_acked = 0; 399 hc->tx_packets_acked = 0;
402 } 400 }
403 } else if (++hctx->ccid2hctx_packets_acked >= hctx->ccid2hctx_cwnd) { 401 } else if (++hc->tx_packets_acked >= hc->tx_cwnd) {
404 hctx->ccid2hctx_cwnd += 1; 402 hc->tx_cwnd += 1;
405 hctx->ccid2hctx_packets_acked = 0; 403 hc->tx_packets_acked = 0;
406 } 404 }
407 405
408 /* update RTO */ 406 /* update RTO */
409 if (hctx->ccid2hctx_srtt == -1 || 407 if (hc->tx_srtt == -1 ||
410 time_after(jiffies, hctx->ccid2hctx_lastrtt + hctx->ccid2hctx_srtt)) { 408 time_after(jiffies, hc->tx_lastrtt + hc->tx_srtt)) {
411 unsigned long r = (long)jiffies - (long)seqp->ccid2s_sent; 409 unsigned long r = (long)jiffies - (long)seqp->ccid2s_sent;
412 int s; 410 int s;
413 411
414 /* first measurement */ 412 /* first measurement */
415 if (hctx->ccid2hctx_srtt == -1) { 413 if (hc->tx_srtt == -1) {
416 ccid2_pr_debug("R: %lu Time=%lu seq=%llu\n", 414 ccid2_pr_debug("R: %lu Time=%lu seq=%llu\n",
417 r, jiffies, 415 r, jiffies,
418 (unsigned long long)seqp->ccid2s_seq); 416 (unsigned long long)seqp->ccid2s_seq);
419 ccid2_change_srtt(hctx, r); 417 ccid2_change_srtt(hc, r);
420 hctx->ccid2hctx_rttvar = r >> 1; 418 hc->tx_rttvar = r >> 1;
421 } else { 419 } else {
422 /* RTTVAR */ 420 /* RTTVAR */
423 long tmp = hctx->ccid2hctx_srtt - r; 421 long tmp = hc->tx_srtt - r;
424 long srtt; 422 long srtt;
425 423
426 if (tmp < 0) 424 if (tmp < 0)
427 tmp *= -1; 425 tmp *= -1;
428 426
429 tmp >>= 2; 427 tmp >>= 2;
430 hctx->ccid2hctx_rttvar *= 3; 428 hc->tx_rttvar *= 3;
431 hctx->ccid2hctx_rttvar >>= 2; 429 hc->tx_rttvar >>= 2;
432 hctx->ccid2hctx_rttvar += tmp; 430 hc->tx_rttvar += tmp;
433 431
434 /* SRTT */ 432 /* SRTT */
435 srtt = hctx->ccid2hctx_srtt; 433 srtt = hc->tx_srtt;
436 srtt *= 7; 434 srtt *= 7;
437 srtt >>= 3; 435 srtt >>= 3;
438 tmp = r >> 3; 436 tmp = r >> 3;
439 srtt += tmp; 437 srtt += tmp;
440 ccid2_change_srtt(hctx, srtt); 438 ccid2_change_srtt(hc, srtt);
441 } 439 }
442 s = hctx->ccid2hctx_rttvar << 2; 440 s = hc->tx_rttvar << 2;
443 /* clock granularity is 1 when based on jiffies */ 441 /* clock granularity is 1 when based on jiffies */
444 if (!s) 442 if (!s)
445 s = 1; 443 s = 1;
446 hctx->ccid2hctx_rto = hctx->ccid2hctx_srtt + s; 444 hc->tx_rto = hc->tx_srtt + s;
447 445
448 /* must be at least a second */ 446 /* must be at least a second */
449 s = hctx->ccid2hctx_rto / HZ; 447 s = hc->tx_rto / HZ;
450 /* DCCP doesn't require this [but I like it cuz my code sux] */ 448 /* DCCP doesn't require this [but I like it cuz my code sux] */
451#if 1 449#if 1
452 if (s < 1) 450 if (s < 1)
453 hctx->ccid2hctx_rto = HZ; 451 hc->tx_rto = HZ;
454#endif 452#endif
455 /* max 60 seconds */ 453 /* max 60 seconds */
456 if (s > 60) 454 if (s > 60)
457 hctx->ccid2hctx_rto = HZ * 60; 455 hc->tx_rto = HZ * 60;
458 456
459 hctx->ccid2hctx_lastrtt = jiffies; 457 hc->tx_lastrtt = jiffies;
460 458
461 ccid2_pr_debug("srtt: %ld rttvar: %ld rto: %ld (HZ=%d) R=%lu\n", 459 ccid2_pr_debug("srtt: %ld rttvar: %ld rto: %ld (HZ=%d) R=%lu\n",
462 hctx->ccid2hctx_srtt, hctx->ccid2hctx_rttvar, 460 hc->tx_srtt, hc->tx_rttvar,
463 hctx->ccid2hctx_rto, HZ, r); 461 hc->tx_rto, HZ, r);
464 } 462 }
465 463
466 /* we got a new ack, so re-start RTO timer */ 464 /* we got a new ack, so re-start RTO timer */
@@ -470,40 +468,40 @@ static inline void ccid2_new_ack(struct sock *sk,
470 468
471static void ccid2_hc_tx_dec_pipe(struct sock *sk) 469static void ccid2_hc_tx_dec_pipe(struct sock *sk)
472{ 470{
473 struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); 471 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
474 472
475 if (hctx->ccid2hctx_pipe == 0) 473 if (hc->tx_pipe == 0)
476 DCCP_BUG("pipe == 0"); 474 DCCP_BUG("pipe == 0");
477 else 475 else
478 hctx->ccid2hctx_pipe--; 476 hc->tx_pipe--;
479 477
480 if (hctx->ccid2hctx_pipe == 0) 478 if (hc->tx_pipe == 0)
481 ccid2_hc_tx_kill_rto_timer(sk); 479 ccid2_hc_tx_kill_rto_timer(sk);
482} 480}
483 481
484static void ccid2_congestion_event(struct sock *sk, struct ccid2_seq *seqp) 482static void ccid2_congestion_event(struct sock *sk, struct ccid2_seq *seqp)
485{ 483{
486 struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); 484 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
487 485
488 if (time_before(seqp->ccid2s_sent, hctx->ccid2hctx_last_cong)) { 486 if (time_before(seqp->ccid2s_sent, hc->tx_last_cong)) {
489 ccid2_pr_debug("Multiple losses in an RTT---treating as one\n"); 487 ccid2_pr_debug("Multiple losses in an RTT---treating as one\n");
490 return; 488 return;
491 } 489 }
492 490
493 hctx->ccid2hctx_last_cong = jiffies; 491 hc->tx_last_cong = jiffies;
494 492
495 hctx->ccid2hctx_cwnd = hctx->ccid2hctx_cwnd / 2 ? : 1U; 493 hc->tx_cwnd = hc->tx_cwnd / 2 ? : 1U;
496 hctx->ccid2hctx_ssthresh = max(hctx->ccid2hctx_cwnd, 2U); 494 hc->tx_ssthresh = max(hc->tx_cwnd, 2U);
497 495
498 /* Avoid spurious timeouts resulting from Ack Ratio > cwnd */ 496 /* Avoid spurious timeouts resulting from Ack Ratio > cwnd */
499 if (dccp_sk(sk)->dccps_l_ack_ratio > hctx->ccid2hctx_cwnd) 497 if (dccp_sk(sk)->dccps_l_ack_ratio > hc->tx_cwnd)
500 ccid2_change_l_ack_ratio(sk, hctx->ccid2hctx_cwnd); 498 ccid2_change_l_ack_ratio(sk, hc->tx_cwnd);
501} 499}
502 500
503static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) 501static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
504{ 502{
505 struct dccp_sock *dp = dccp_sk(sk); 503 struct dccp_sock *dp = dccp_sk(sk);
506 struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); 504 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
507 u64 ackno, seqno; 505 u64 ackno, seqno;
508 struct ccid2_seq *seqp; 506 struct ccid2_seq *seqp;
509 unsigned char *vector; 507 unsigned char *vector;
@@ -512,7 +510,7 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
512 int done = 0; 510 int done = 0;
513 unsigned int maxincr = 0; 511 unsigned int maxincr = 0;
514 512
515 ccid2_hc_tx_check_sanity(hctx); 513 ccid2_hc_tx_check_sanity(hc);
516 /* check reverse path congestion */ 514 /* check reverse path congestion */
517 seqno = DCCP_SKB_CB(skb)->dccpd_seq; 515 seqno = DCCP_SKB_CB(skb)->dccpd_seq;
518 516
@@ -521,21 +519,21 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
521 * -sorbo. 519 * -sorbo.
522 */ 520 */
523 /* need to bootstrap */ 521 /* need to bootstrap */
524 if (hctx->ccid2hctx_rpdupack == -1) { 522 if (hc->tx_rpdupack == -1) {
525 hctx->ccid2hctx_rpdupack = 0; 523 hc->tx_rpdupack = 0;
526 hctx->ccid2hctx_rpseq = seqno; 524 hc->tx_rpseq = seqno;
527 } else { 525 } else {
528 /* check if packet is consecutive */ 526 /* check if packet is consecutive */
529 if (dccp_delta_seqno(hctx->ccid2hctx_rpseq, seqno) == 1) 527 if (dccp_delta_seqno(hc->tx_rpseq, seqno) == 1)
530 hctx->ccid2hctx_rpseq = seqno; 528 hc->tx_rpseq = seqno;
531 /* it's a later packet */ 529 /* it's a later packet */
532 else if (after48(seqno, hctx->ccid2hctx_rpseq)) { 530 else if (after48(seqno, hc->tx_rpseq)) {
533 hctx->ccid2hctx_rpdupack++; 531 hc->tx_rpdupack++;
534 532
535 /* check if we got enough dupacks */ 533 /* check if we got enough dupacks */
536 if (hctx->ccid2hctx_rpdupack >= NUMDUPACK) { 534 if (hc->tx_rpdupack >= NUMDUPACK) {
537 hctx->ccid2hctx_rpdupack = -1; /* XXX lame */ 535 hc->tx_rpdupack = -1; /* XXX lame */
538 hctx->ccid2hctx_rpseq = 0; 536 hc->tx_rpseq = 0;
539 537
540 ccid2_change_l_ack_ratio(sk, 2 * dp->dccps_l_ack_ratio); 538 ccid2_change_l_ack_ratio(sk, 2 * dp->dccps_l_ack_ratio);
541 } 539 }
@@ -544,7 +542,7 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
544 542
545 /* check forward path congestion */ 543 /* check forward path congestion */
546 /* still didn't send out new data packets */ 544 /* still didn't send out new data packets */
547 if (hctx->ccid2hctx_seqh == hctx->ccid2hctx_seqt) 545 if (hc->tx_seqh == hc->tx_seqt)
548 return; 546 return;
549 547
550 switch (DCCP_SKB_CB(skb)->dccpd_type) { 548 switch (DCCP_SKB_CB(skb)->dccpd_type) {
@@ -556,14 +554,14 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
556 } 554 }
557 555
558 ackno = DCCP_SKB_CB(skb)->dccpd_ack_seq; 556 ackno = DCCP_SKB_CB(skb)->dccpd_ack_seq;
559 if (after48(ackno, hctx->ccid2hctx_high_ack)) 557 if (after48(ackno, hc->tx_high_ack))
560 hctx->ccid2hctx_high_ack = ackno; 558 hc->tx_high_ack = ackno;
561 559
562 seqp = hctx->ccid2hctx_seqt; 560 seqp = hc->tx_seqt;
563 while (before48(seqp->ccid2s_seq, ackno)) { 561 while (before48(seqp->ccid2s_seq, ackno)) {
564 seqp = seqp->ccid2s_next; 562 seqp = seqp->ccid2s_next;
565 if (seqp == hctx->ccid2hctx_seqh) { 563 if (seqp == hc->tx_seqh) {
566 seqp = hctx->ccid2hctx_seqh->ccid2s_prev; 564 seqp = hc->tx_seqh->ccid2s_prev;
567 break; 565 break;
568 } 566 }
569 } 567 }
@@ -573,7 +571,7 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
573 * packets per acknowledgement. Rounding up avoids that cwnd is not 571 * packets per acknowledgement. Rounding up avoids that cwnd is not
574 * advanced when Ack Ratio is 1 and gives a slight edge otherwise. 572 * advanced when Ack Ratio is 1 and gives a slight edge otherwise.
575 */ 573 */
576 if (hctx->ccid2hctx_cwnd < hctx->ccid2hctx_ssthresh) 574 if (hc->tx_cwnd < hc->tx_ssthresh)
577 maxincr = DIV_ROUND_UP(dp->dccps_l_ack_ratio, 2); 575 maxincr = DIV_ROUND_UP(dp->dccps_l_ack_ratio, 2);
578 576
579 /* go through all ack vectors */ 577 /* go through all ack vectors */
@@ -592,7 +590,7 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
592 * seqnos. 590 * seqnos.
593 */ 591 */
594 while (after48(seqp->ccid2s_seq, ackno)) { 592 while (after48(seqp->ccid2s_seq, ackno)) {
595 if (seqp == hctx->ccid2hctx_seqt) { 593 if (seqp == hc->tx_seqt) {
596 done = 1; 594 done = 1;
597 break; 595 break;
598 } 596 }
@@ -624,7 +622,7 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
624 (unsigned long long)seqp->ccid2s_seq); 622 (unsigned long long)seqp->ccid2s_seq);
625 ccid2_hc_tx_dec_pipe(sk); 623 ccid2_hc_tx_dec_pipe(sk);
626 } 624 }
627 if (seqp == hctx->ccid2hctx_seqt) { 625 if (seqp == hc->tx_seqt) {
628 done = 1; 626 done = 1;
629 break; 627 break;
630 } 628 }
@@ -643,11 +641,11 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
643 /* The state about what is acked should be correct now 641 /* The state about what is acked should be correct now
644 * Check for NUMDUPACK 642 * Check for NUMDUPACK
645 */ 643 */
646 seqp = hctx->ccid2hctx_seqt; 644 seqp = hc->tx_seqt;
647 while (before48(seqp->ccid2s_seq, hctx->ccid2hctx_high_ack)) { 645 while (before48(seqp->ccid2s_seq, hc->tx_high_ack)) {
648 seqp = seqp->ccid2s_next; 646 seqp = seqp->ccid2s_next;
649 if (seqp == hctx->ccid2hctx_seqh) { 647 if (seqp == hc->tx_seqh) {
650 seqp = hctx->ccid2hctx_seqh->ccid2s_prev; 648 seqp = hc->tx_seqh->ccid2s_prev;
651 break; 649 break;
652 } 650 }
653 } 651 }
@@ -658,7 +656,7 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
658 if (done == NUMDUPACK) 656 if (done == NUMDUPACK)
659 break; 657 break;
660 } 658 }
661 if (seqp == hctx->ccid2hctx_seqt) 659 if (seqp == hc->tx_seqt)
662 break; 660 break;
663 seqp = seqp->ccid2s_prev; 661 seqp = seqp->ccid2s_prev;
664 } 662 }
@@ -681,86 +679,86 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
681 ccid2_congestion_event(sk, seqp); 679 ccid2_congestion_event(sk, seqp);
682 ccid2_hc_tx_dec_pipe(sk); 680 ccid2_hc_tx_dec_pipe(sk);
683 } 681 }
684 if (seqp == hctx->ccid2hctx_seqt) 682 if (seqp == hc->tx_seqt)
685 break; 683 break;
686 seqp = seqp->ccid2s_prev; 684 seqp = seqp->ccid2s_prev;
687 } 685 }
688 686
689 hctx->ccid2hctx_seqt = last_acked; 687 hc->tx_seqt = last_acked;
690 } 688 }
691 689
692 /* trim acked packets in tail */ 690 /* trim acked packets in tail */
693 while (hctx->ccid2hctx_seqt != hctx->ccid2hctx_seqh) { 691 while (hc->tx_seqt != hc->tx_seqh) {
694 if (!hctx->ccid2hctx_seqt->ccid2s_acked) 692 if (!hc->tx_seqt->ccid2s_acked)
695 break; 693 break;
696 694
697 hctx->ccid2hctx_seqt = hctx->ccid2hctx_seqt->ccid2s_next; 695 hc->tx_seqt = hc->tx_seqt->ccid2s_next;
698 } 696 }
699 697
700 ccid2_hc_tx_check_sanity(hctx); 698 ccid2_hc_tx_check_sanity(hc);
701} 699}
702 700
703static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk) 701static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk)
704{ 702{
705 struct ccid2_hc_tx_sock *hctx = ccid_priv(ccid); 703 struct ccid2_hc_tx_sock *hc = ccid_priv(ccid);
706 struct dccp_sock *dp = dccp_sk(sk); 704 struct dccp_sock *dp = dccp_sk(sk);
707 u32 max_ratio; 705 u32 max_ratio;
708 706
709 /* RFC 4341, 5: initialise ssthresh to arbitrarily high (max) value */ 707 /* RFC 4341, 5: initialise ssthresh to arbitrarily high (max) value */
710 hctx->ccid2hctx_ssthresh = ~0U; 708 hc->tx_ssthresh = ~0U;
711 709
712 /* 710 /*
713 * RFC 4341, 5: "The cwnd parameter is initialized to at most four 711 * RFC 4341, 5: "The cwnd parameter is initialized to at most four
714 * packets for new connections, following the rules from [RFC3390]". 712 * packets for new connections, following the rules from [RFC3390]".
715 * We need to convert the bytes of RFC3390 into the packets of RFC 4341. 713 * We need to convert the bytes of RFC3390 into the packets of RFC 4341.
716 */ 714 */
717 hctx->ccid2hctx_cwnd = clamp(4380U / dp->dccps_mss_cache, 2U, 4U); 715 hc->tx_cwnd = clamp(4380U / dp->dccps_mss_cache, 2U, 4U);
718 716
719 /* Make sure that Ack Ratio is enabled and within bounds. */ 717 /* Make sure that Ack Ratio is enabled and within bounds. */
720 max_ratio = DIV_ROUND_UP(hctx->ccid2hctx_cwnd, 2); 718 max_ratio = DIV_ROUND_UP(hc->tx_cwnd, 2);
721 if (dp->dccps_l_ack_ratio == 0 || dp->dccps_l_ack_ratio > max_ratio) 719 if (dp->dccps_l_ack_ratio == 0 || dp->dccps_l_ack_ratio > max_ratio)
722 dp->dccps_l_ack_ratio = max_ratio; 720 dp->dccps_l_ack_ratio = max_ratio;
723 721
724 /* XXX init ~ to window size... */ 722 /* XXX init ~ to window size... */
725 if (ccid2_hc_tx_alloc_seq(hctx)) 723 if (ccid2_hc_tx_alloc_seq(hc))
726 return -ENOMEM; 724 return -ENOMEM;
727 725
728 hctx->ccid2hctx_rto = 3 * HZ; 726 hc->tx_rto = 3 * HZ;
729 ccid2_change_srtt(hctx, -1); 727 ccid2_change_srtt(hc, -1);
730 hctx->ccid2hctx_rttvar = -1; 728 hc->tx_rttvar = -1;
731 hctx->ccid2hctx_rpdupack = -1; 729 hc->tx_rpdupack = -1;
732 hctx->ccid2hctx_last_cong = jiffies; 730 hc->tx_last_cong = jiffies;
733 setup_timer(&hctx->ccid2hctx_rtotimer, ccid2_hc_tx_rto_expire, 731 setup_timer(&hc->tx_rtotimer, ccid2_hc_tx_rto_expire,
734 (unsigned long)sk); 732 (unsigned long)sk);
735 733
736 ccid2_hc_tx_check_sanity(hctx); 734 ccid2_hc_tx_check_sanity(hc);
737 return 0; 735 return 0;
738} 736}
739 737
740static void ccid2_hc_tx_exit(struct sock *sk) 738static void ccid2_hc_tx_exit(struct sock *sk)
741{ 739{
742 struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); 740 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
743 int i; 741 int i;
744 742
745 ccid2_hc_tx_kill_rto_timer(sk); 743 ccid2_hc_tx_kill_rto_timer(sk);
746 744
747 for (i = 0; i < hctx->ccid2hctx_seqbufc; i++) 745 for (i = 0; i < hc->tx_seqbufc; i++)
748 kfree(hctx->ccid2hctx_seqbuf[i]); 746 kfree(hc->tx_seqbuf[i]);
749 hctx->ccid2hctx_seqbufc = 0; 747 hc->tx_seqbufc = 0;
750} 748}
751 749
752static void ccid2_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) 750static void ccid2_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
753{ 751{
754 const struct dccp_sock *dp = dccp_sk(sk); 752 const struct dccp_sock *dp = dccp_sk(sk);
755 struct ccid2_hc_rx_sock *hcrx = ccid2_hc_rx_sk(sk); 753 struct ccid2_hc_rx_sock *hc = ccid2_hc_rx_sk(sk);
756 754
757 switch (DCCP_SKB_CB(skb)->dccpd_type) { 755 switch (DCCP_SKB_CB(skb)->dccpd_type) {
758 case DCCP_PKT_DATA: 756 case DCCP_PKT_DATA:
759 case DCCP_PKT_DATAACK: 757 case DCCP_PKT_DATAACK:
760 hcrx->ccid2hcrx_data++; 758 hc->rx_data++;
761 if (hcrx->ccid2hcrx_data >= dp->dccps_r_ack_ratio) { 759 if (hc->rx_data >= dp->dccps_r_ack_ratio) {
762 dccp_send_ack(sk); 760 dccp_send_ack(sk);
763 hcrx->ccid2hcrx_data = 0; 761 hc->rx_data = 0;
764 } 762 }
765 break; 763 break;
766 } 764 }
diff --git a/net/dccp/ccids/ccid2.h b/net/dccp/ccids/ccid2.h
index 326ac90fb909..1ec6a30103bb 100644
--- a/net/dccp/ccids/ccid2.h
+++ b/net/dccp/ccids/ccid2.h
@@ -40,34 +40,34 @@ struct ccid2_seq {
40 40
41/** 41/**
42 * struct ccid2_hc_tx_sock - CCID2 TX half connection 42 * struct ccid2_hc_tx_sock - CCID2 TX half connection
43 * @ccid2hctx_{cwnd,ssthresh,pipe}: as per RFC 4341, section 5 43 * @tx_{cwnd,ssthresh,pipe}: as per RFC 4341, section 5
44 * @ccid2hctx_packets_acked - Ack counter for deriving cwnd growth (RFC 3465) 44 * @tx_packets_acked: Ack counter for deriving cwnd growth (RFC 3465)
45 * @ccid2hctx_lastrtt -time RTT was last measured 45 * @tx_lastrtt: time RTT was last measured
46 * @ccid2hctx_rpseq - last consecutive seqno 46 * @tx_rpseq: last consecutive seqno
47 * @ccid2hctx_rpdupack - dupacks since rpseq 47 * @tx_rpdupack: dupacks since rpseq
48 */ 48 */
49struct ccid2_hc_tx_sock { 49struct ccid2_hc_tx_sock {
50 u32 ccid2hctx_cwnd; 50 u32 tx_cwnd;
51 u32 ccid2hctx_ssthresh; 51 u32 tx_ssthresh;
52 u32 ccid2hctx_pipe; 52 u32 tx_pipe;
53 u32 ccid2hctx_packets_acked; 53 u32 tx_packets_acked;
54 struct ccid2_seq *ccid2hctx_seqbuf[CCID2_SEQBUF_MAX]; 54 struct ccid2_seq *tx_seqbuf[CCID2_SEQBUF_MAX];
55 int ccid2hctx_seqbufc; 55 int tx_seqbufc;
56 struct ccid2_seq *ccid2hctx_seqh; 56 struct ccid2_seq *tx_seqh;
57 struct ccid2_seq *ccid2hctx_seqt; 57 struct ccid2_seq *tx_seqt;
58 long ccid2hctx_rto; 58 long tx_rto;
59 long ccid2hctx_srtt; 59 long tx_srtt;
60 long ccid2hctx_rttvar; 60 long tx_rttvar;
61 unsigned long ccid2hctx_lastrtt; 61 unsigned long tx_lastrtt;
62 struct timer_list ccid2hctx_rtotimer; 62 struct timer_list tx_rtotimer;
63 u64 ccid2hctx_rpseq; 63 u64 tx_rpseq;
64 int ccid2hctx_rpdupack; 64 int tx_rpdupack;
65 unsigned long ccid2hctx_last_cong; 65 unsigned long tx_last_cong;
66 u64 ccid2hctx_high_ack; 66 u64 tx_high_ack;
67}; 67};
68 68
69struct ccid2_hc_rx_sock { 69struct ccid2_hc_rx_sock {
70 int ccid2hcrx_data; 70 int rx_data;
71}; 71};
72 72
73static inline struct ccid2_hc_tx_sock *ccid2_hc_tx_sk(const struct sock *sk) 73static inline struct ccid2_hc_tx_sock *ccid2_hc_tx_sk(const struct sock *sk)
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
index 34dcc798c457..bcd7632299f5 100644
--- a/net/dccp/ccids/ccid3.c
+++ b/net/dccp/ccids/ccid3.c
@@ -64,14 +64,14 @@ static const char *ccid3_tx_state_name(enum ccid3_hc_tx_states state)
64static void ccid3_hc_tx_set_state(struct sock *sk, 64static void ccid3_hc_tx_set_state(struct sock *sk,
65 enum ccid3_hc_tx_states state) 65 enum ccid3_hc_tx_states state)
66{ 66{
67 struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); 67 struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
68 enum ccid3_hc_tx_states oldstate = hctx->ccid3hctx_state; 68 enum ccid3_hc_tx_states oldstate = hc->tx_state;
69 69
70 ccid3_pr_debug("%s(%p) %-8.8s -> %s\n", 70 ccid3_pr_debug("%s(%p) %-8.8s -> %s\n",
71 dccp_role(sk), sk, ccid3_tx_state_name(oldstate), 71 dccp_role(sk), sk, ccid3_tx_state_name(oldstate),
72 ccid3_tx_state_name(state)); 72 ccid3_tx_state_name(state));
73 WARN_ON(state == oldstate); 73 WARN_ON(state == oldstate);
74 hctx->ccid3hctx_state = state; 74 hc->tx_state = state;
75} 75}
76 76
77/* 77/*
@@ -85,37 +85,32 @@ static void ccid3_hc_tx_set_state(struct sock *sk,
85 */ 85 */
86static inline u64 rfc3390_initial_rate(struct sock *sk) 86static inline u64 rfc3390_initial_rate(struct sock *sk)
87{ 87{
88 const struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); 88 const struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
89 const __u32 w_init = clamp_t(__u32, 4380U, 89 const __u32 w_init = clamp_t(__u32, 4380U, 2 * hc->tx_s, 4 * hc->tx_s);
90 2 * hctx->ccid3hctx_s, 4 * hctx->ccid3hctx_s);
91 90
92 return scaled_div(w_init << 6, hctx->ccid3hctx_rtt); 91 return scaled_div(w_init << 6, hc->tx_rtt);
93} 92}
94 93
95/* 94/*
96 * Recalculate t_ipi and delta (should be called whenever X changes) 95 * Recalculate t_ipi and delta (should be called whenever X changes)
97 */ 96 */
98static void ccid3_update_send_interval(struct ccid3_hc_tx_sock *hctx) 97static void ccid3_update_send_interval(struct ccid3_hc_tx_sock *hc)
99{ 98{
100 /* Calculate new t_ipi = s / X_inst (X_inst is in 64 * bytes/second) */ 99 /* Calculate new t_ipi = s / X_inst (X_inst is in 64 * bytes/second) */
101 hctx->ccid3hctx_t_ipi = scaled_div32(((u64)hctx->ccid3hctx_s) << 6, 100 hc->tx_t_ipi = scaled_div32(((u64)hc->tx_s) << 6, hc->tx_x);
102 hctx->ccid3hctx_x);
103 101
104 /* Calculate new delta by delta = min(t_ipi / 2, t_gran / 2) */ 102 /* Calculate new delta by delta = min(t_ipi / 2, t_gran / 2) */
105 hctx->ccid3hctx_delta = min_t(u32, hctx->ccid3hctx_t_ipi / 2, 103 hc->tx_delta = min_t(u32, hc->tx_t_ipi / 2, TFRC_OPSYS_HALF_TIME_GRAN);
106 TFRC_OPSYS_HALF_TIME_GRAN);
107
108 ccid3_pr_debug("t_ipi=%u, delta=%u, s=%u, X=%u\n",
109 hctx->ccid3hctx_t_ipi, hctx->ccid3hctx_delta,
110 hctx->ccid3hctx_s, (unsigned)(hctx->ccid3hctx_x >> 6));
111 104
105 ccid3_pr_debug("t_ipi=%u, delta=%u, s=%u, X=%u\n", hc->tx_t_ipi,
106 hc->tx_delta, hc->tx_s, (unsigned)(hc->tx_x >> 6));
112} 107}
113 108
114static u32 ccid3_hc_tx_idle_rtt(struct ccid3_hc_tx_sock *hctx, ktime_t now) 109static u32 ccid3_hc_tx_idle_rtt(struct ccid3_hc_tx_sock *hc, ktime_t now)
115{ 110{
116 u32 delta = ktime_us_delta(now, hctx->ccid3hctx_t_last_win_count); 111 u32 delta = ktime_us_delta(now, hc->tx_t_last_win_count);
117 112
118 return delta / hctx->ccid3hctx_rtt; 113 return delta / hc->tx_rtt;
119} 114}
120 115
121/** 116/**
@@ -130,9 +125,9 @@ static u32 ccid3_hc_tx_idle_rtt(struct ccid3_hc_tx_sock *hctx, ktime_t now)
130 */ 125 */
131static void ccid3_hc_tx_update_x(struct sock *sk, ktime_t *stamp) 126static void ccid3_hc_tx_update_x(struct sock *sk, ktime_t *stamp)
132{ 127{
133 struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); 128 struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
134 __u64 min_rate = 2 * hctx->ccid3hctx_x_recv; 129 __u64 min_rate = 2 * hc->tx_x_recv;
135 const __u64 old_x = hctx->ccid3hctx_x; 130 const __u64 old_x = hc->tx_x;
136 ktime_t now = stamp ? *stamp : ktime_get_real(); 131 ktime_t now = stamp ? *stamp : ktime_get_real();
137 132
138 /* 133 /*
@@ -141,37 +136,31 @@ static void ccid3_hc_tx_update_x(struct sock *sk, ktime_t *stamp)
141 * a sender is idle if it has not sent anything over a 2-RTT-period. 136 * a sender is idle if it has not sent anything over a 2-RTT-period.
142 * For consistency with X and X_recv, min_rate is also scaled by 2^6. 137 * For consistency with X and X_recv, min_rate is also scaled by 2^6.
143 */ 138 */
144 if (ccid3_hc_tx_idle_rtt(hctx, now) >= 2) { 139 if (ccid3_hc_tx_idle_rtt(hc, now) >= 2) {
145 min_rate = rfc3390_initial_rate(sk); 140 min_rate = rfc3390_initial_rate(sk);
146 min_rate = max(min_rate, 2 * hctx->ccid3hctx_x_recv); 141 min_rate = max(min_rate, 2 * hc->tx_x_recv);
147 } 142 }
148 143
149 if (hctx->ccid3hctx_p > 0) { 144 if (hc->tx_p > 0) {
150 145
151 hctx->ccid3hctx_x = min(((__u64)hctx->ccid3hctx_x_calc) << 6, 146 hc->tx_x = min(((__u64)hc->tx_x_calc) << 6, min_rate);
152 min_rate); 147 hc->tx_x = max(hc->tx_x, (((__u64)hc->tx_s) << 6) / TFRC_T_MBI);
153 hctx->ccid3hctx_x = max(hctx->ccid3hctx_x,
154 (((__u64)hctx->ccid3hctx_s) << 6) /
155 TFRC_T_MBI);
156 148
157 } else if (ktime_us_delta(now, hctx->ccid3hctx_t_ld) 149 } else if (ktime_us_delta(now, hc->tx_t_ld) - (s64)hc->tx_rtt >= 0) {
158 - (s64)hctx->ccid3hctx_rtt >= 0) {
159 150
160 hctx->ccid3hctx_x = min(2 * hctx->ccid3hctx_x, min_rate); 151 hc->tx_x = min(2 * hc->tx_x, min_rate);
161 hctx->ccid3hctx_x = max(hctx->ccid3hctx_x, 152 hc->tx_x = max(hc->tx_x,
162 scaled_div(((__u64)hctx->ccid3hctx_s) << 6, 153 scaled_div(((__u64)hc->tx_s) << 6, hc->tx_rtt));
163 hctx->ccid3hctx_rtt)); 154 hc->tx_t_ld = now;
164 hctx->ccid3hctx_t_ld = now;
165 } 155 }
166 156
167 if (hctx->ccid3hctx_x != old_x) { 157 if (hc->tx_x != old_x) {
168 ccid3_pr_debug("X_prev=%u, X_now=%u, X_calc=%u, " 158 ccid3_pr_debug("X_prev=%u, X_now=%u, X_calc=%u, "
169 "X_recv=%u\n", (unsigned)(old_x >> 6), 159 "X_recv=%u\n", (unsigned)(old_x >> 6),
170 (unsigned)(hctx->ccid3hctx_x >> 6), 160 (unsigned)(hc->tx_x >> 6), hc->tx_x_calc,
171 hctx->ccid3hctx_x_calc, 161 (unsigned)(hc->tx_x_recv >> 6));
172 (unsigned)(hctx->ccid3hctx_x_recv >> 6));
173 162
174 ccid3_update_send_interval(hctx); 163 ccid3_update_send_interval(hc);
175 } 164 }
176} 165}
177 166
@@ -179,37 +168,37 @@ static void ccid3_hc_tx_update_x(struct sock *sk, ktime_t *stamp)
179 * Track the mean packet size `s' (cf. RFC 4342, 5.3 and RFC 3448, 4.1) 168 * Track the mean packet size `s' (cf. RFC 4342, 5.3 and RFC 3448, 4.1)
180 * @len: DCCP packet payload size in bytes 169 * @len: DCCP packet payload size in bytes
181 */ 170 */
182static inline void ccid3_hc_tx_update_s(struct ccid3_hc_tx_sock *hctx, int len) 171static inline void ccid3_hc_tx_update_s(struct ccid3_hc_tx_sock *hc, int len)
183{ 172{
184 const u16 old_s = hctx->ccid3hctx_s; 173 const u16 old_s = hc->tx_s;
185 174
186 hctx->ccid3hctx_s = tfrc_ewma(hctx->ccid3hctx_s, len, 9); 175 hc->tx_s = tfrc_ewma(hc->tx_s, len, 9);
187 176
188 if (hctx->ccid3hctx_s != old_s) 177 if (hc->tx_s != old_s)
189 ccid3_update_send_interval(hctx); 178 ccid3_update_send_interval(hc);
190} 179}
191 180
192/* 181/*
193 * Update Window Counter using the algorithm from [RFC 4342, 8.1]. 182 * Update Window Counter using the algorithm from [RFC 4342, 8.1].
194 * As elsewhere, RTT > 0 is assumed by using dccp_sample_rtt(). 183 * As elsewhere, RTT > 0 is assumed by using dccp_sample_rtt().
195 */ 184 */
196static inline void ccid3_hc_tx_update_win_count(struct ccid3_hc_tx_sock *hctx, 185static inline void ccid3_hc_tx_update_win_count(struct ccid3_hc_tx_sock *hc,
197 ktime_t now) 186 ktime_t now)
198{ 187{
199 u32 delta = ktime_us_delta(now, hctx->ccid3hctx_t_last_win_count), 188 u32 delta = ktime_us_delta(now, hc->tx_t_last_win_count),
200 quarter_rtts = (4 * delta) / hctx->ccid3hctx_rtt; 189 quarter_rtts = (4 * delta) / hc->tx_rtt;
201 190
202 if (quarter_rtts > 0) { 191 if (quarter_rtts > 0) {
203 hctx->ccid3hctx_t_last_win_count = now; 192 hc->tx_t_last_win_count = now;
204 hctx->ccid3hctx_last_win_count += min(quarter_rtts, 5U); 193 hc->tx_last_win_count += min(quarter_rtts, 5U);
205 hctx->ccid3hctx_last_win_count &= 0xF; /* mod 16 */ 194 hc->tx_last_win_count &= 0xF; /* mod 16 */
206 } 195 }
207} 196}
208 197
209static void ccid3_hc_tx_no_feedback_timer(unsigned long data) 198static void ccid3_hc_tx_no_feedback_timer(unsigned long data)
210{ 199{
211 struct sock *sk = (struct sock *)data; 200 struct sock *sk = (struct sock *)data;
212 struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); 201 struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
213 unsigned long t_nfb = USEC_PER_SEC / 5; 202 unsigned long t_nfb = USEC_PER_SEC / 5;
214 203
215 bh_lock_sock(sk); 204 bh_lock_sock(sk);
@@ -220,24 +209,23 @@ static void ccid3_hc_tx_no_feedback_timer(unsigned long data)
220 } 209 }
221 210
222 ccid3_pr_debug("%s(%p, state=%s) - entry \n", dccp_role(sk), sk, 211 ccid3_pr_debug("%s(%p, state=%s) - entry \n", dccp_role(sk), sk,
223 ccid3_tx_state_name(hctx->ccid3hctx_state)); 212 ccid3_tx_state_name(hc->tx_state));
224 213
225 if (hctx->ccid3hctx_state == TFRC_SSTATE_FBACK) 214 if (hc->tx_state == TFRC_SSTATE_FBACK)
226 ccid3_hc_tx_set_state(sk, TFRC_SSTATE_NO_FBACK); 215 ccid3_hc_tx_set_state(sk, TFRC_SSTATE_NO_FBACK);
227 else if (hctx->ccid3hctx_state != TFRC_SSTATE_NO_FBACK) 216 else if (hc->tx_state != TFRC_SSTATE_NO_FBACK)
228 goto out; 217 goto out;
229 218
230 /* 219 /*
231 * Determine new allowed sending rate X as per draft rfc3448bis-00, 4.4 220 * Determine new allowed sending rate X as per draft rfc3448bis-00, 4.4
232 */ 221 */
233 if (hctx->ccid3hctx_t_rto == 0 || /* no feedback received yet */ 222 if (hc->tx_t_rto == 0 || /* no feedback received yet */
234 hctx->ccid3hctx_p == 0) { 223 hc->tx_p == 0) {
235 224
236 /* halve send rate directly */ 225 /* halve send rate directly */
237 hctx->ccid3hctx_x = max(hctx->ccid3hctx_x / 2, 226 hc->tx_x = max(hc->tx_x / 2,
238 (((__u64)hctx->ccid3hctx_s) << 6) / 227 (((__u64)hc->tx_s) << 6) / TFRC_T_MBI);
239 TFRC_T_MBI); 228 ccid3_update_send_interval(hc);
240 ccid3_update_send_interval(hctx);
241 } else { 229 } else {
242 /* 230 /*
243 * Modify the cached value of X_recv 231 * Modify the cached value of X_recv
@@ -249,33 +237,32 @@ static void ccid3_hc_tx_no_feedback_timer(unsigned long data)
249 * 237 *
250 * Note that X_recv is scaled by 2^6 while X_calc is not 238 * Note that X_recv is scaled by 2^6 while X_calc is not
251 */ 239 */
252 BUG_ON(hctx->ccid3hctx_p && !hctx->ccid3hctx_x_calc); 240 BUG_ON(hc->tx_p && !hc->tx_x_calc);
253 241
254 if (hctx->ccid3hctx_x_calc > (hctx->ccid3hctx_x_recv >> 5)) 242 if (hc->tx_x_calc > (hc->tx_x_recv >> 5))
255 hctx->ccid3hctx_x_recv = 243 hc->tx_x_recv =
256 max(hctx->ccid3hctx_x_recv / 2, 244 max(hc->tx_x_recv / 2,
257 (((__u64)hctx->ccid3hctx_s) << 6) / 245 (((__u64)hc->tx_s) << 6) / (2*TFRC_T_MBI));
258 (2 * TFRC_T_MBI));
259 else { 246 else {
260 hctx->ccid3hctx_x_recv = hctx->ccid3hctx_x_calc; 247 hc->tx_x_recv = hc->tx_x_calc;
261 hctx->ccid3hctx_x_recv <<= 4; 248 hc->tx_x_recv <<= 4;
262 } 249 }
263 ccid3_hc_tx_update_x(sk, NULL); 250 ccid3_hc_tx_update_x(sk, NULL);
264 } 251 }
265 ccid3_pr_debug("Reduced X to %llu/64 bytes/sec\n", 252 ccid3_pr_debug("Reduced X to %llu/64 bytes/sec\n",
266 (unsigned long long)hctx->ccid3hctx_x); 253 (unsigned long long)hc->tx_x);
267 254
268 /* 255 /*
269 * Set new timeout for the nofeedback timer. 256 * Set new timeout for the nofeedback timer.
270 * See comments in packet_recv() regarding the value of t_RTO. 257 * See comments in packet_recv() regarding the value of t_RTO.
271 */ 258 */
272 if (unlikely(hctx->ccid3hctx_t_rto == 0)) /* no feedback yet */ 259 if (unlikely(hc->tx_t_rto == 0)) /* no feedback yet */
273 t_nfb = TFRC_INITIAL_TIMEOUT; 260 t_nfb = TFRC_INITIAL_TIMEOUT;
274 else 261 else
275 t_nfb = max(hctx->ccid3hctx_t_rto, 2 * hctx->ccid3hctx_t_ipi); 262 t_nfb = max(hc->tx_t_rto, 2 * hc->tx_t_ipi);
276 263
277restart_timer: 264restart_timer:
278 sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer, 265 sk_reset_timer(sk, &hc->tx_no_feedback_timer,
279 jiffies + usecs_to_jiffies(t_nfb)); 266 jiffies + usecs_to_jiffies(t_nfb));
280out: 267out:
281 bh_unlock_sock(sk); 268 bh_unlock_sock(sk);
@@ -291,7 +278,7 @@ out:
291static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb) 278static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
292{ 279{
293 struct dccp_sock *dp = dccp_sk(sk); 280 struct dccp_sock *dp = dccp_sk(sk);
294 struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); 281 struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
295 ktime_t now = ktime_get_real(); 282 ktime_t now = ktime_get_real();
296 s64 delay; 283 s64 delay;
297 284
@@ -303,18 +290,17 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
303 if (unlikely(skb->len == 0)) 290 if (unlikely(skb->len == 0))
304 return -EBADMSG; 291 return -EBADMSG;
305 292
306 switch (hctx->ccid3hctx_state) { 293 switch (hc->tx_state) {
307 case TFRC_SSTATE_NO_SENT: 294 case TFRC_SSTATE_NO_SENT:
308 sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer, 295 sk_reset_timer(sk, &hc->tx_no_feedback_timer, (jiffies +
309 (jiffies + 296 usecs_to_jiffies(TFRC_INITIAL_TIMEOUT)));
310 usecs_to_jiffies(TFRC_INITIAL_TIMEOUT))); 297 hc->tx_last_win_count = 0;
311 hctx->ccid3hctx_last_win_count = 0; 298 hc->tx_t_last_win_count = now;
312 hctx->ccid3hctx_t_last_win_count = now;
313 299
314 /* Set t_0 for initial packet */ 300 /* Set t_0 for initial packet */
315 hctx->ccid3hctx_t_nom = now; 301 hc->tx_t_nom = now;
316 302
317 hctx->ccid3hctx_s = skb->len; 303 hc->tx_s = skb->len;
318 304
319 /* 305 /*
320 * Use initial RTT sample when available: recommended by erratum 306 * Use initial RTT sample when available: recommended by erratum
@@ -323,9 +309,9 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
323 */ 309 */
324 if (dp->dccps_syn_rtt) { 310 if (dp->dccps_syn_rtt) {
325 ccid3_pr_debug("SYN RTT = %uus\n", dp->dccps_syn_rtt); 311 ccid3_pr_debug("SYN RTT = %uus\n", dp->dccps_syn_rtt);
326 hctx->ccid3hctx_rtt = dp->dccps_syn_rtt; 312 hc->tx_rtt = dp->dccps_syn_rtt;
327 hctx->ccid3hctx_x = rfc3390_initial_rate(sk); 313 hc->tx_x = rfc3390_initial_rate(sk);
328 hctx->ccid3hctx_t_ld = now; 314 hc->tx_t_ld = now;
329 } else { 315 } else {
330 /* 316 /*
331 * Sender does not have RTT sample: 317 * Sender does not have RTT sample:
@@ -333,17 +319,17 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
333 * is needed in several parts (e.g. window counter); 319 * is needed in several parts (e.g. window counter);
334 * - set sending rate X_pps = 1pps as per RFC 3448, 4.2. 320 * - set sending rate X_pps = 1pps as per RFC 3448, 4.2.
335 */ 321 */
336 hctx->ccid3hctx_rtt = DCCP_FALLBACK_RTT; 322 hc->tx_rtt = DCCP_FALLBACK_RTT;
337 hctx->ccid3hctx_x = hctx->ccid3hctx_s; 323 hc->tx_x = hc->tx_s;
338 hctx->ccid3hctx_x <<= 6; 324 hc->tx_x <<= 6;
339 } 325 }
340 ccid3_update_send_interval(hctx); 326 ccid3_update_send_interval(hc);
341 327
342 ccid3_hc_tx_set_state(sk, TFRC_SSTATE_NO_FBACK); 328 ccid3_hc_tx_set_state(sk, TFRC_SSTATE_NO_FBACK);
343 break; 329 break;
344 case TFRC_SSTATE_NO_FBACK: 330 case TFRC_SSTATE_NO_FBACK:
345 case TFRC_SSTATE_FBACK: 331 case TFRC_SSTATE_FBACK:
346 delay = ktime_us_delta(hctx->ccid3hctx_t_nom, now); 332 delay = ktime_us_delta(hc->tx_t_nom, now);
347 ccid3_pr_debug("delay=%ld\n", (long)delay); 333 ccid3_pr_debug("delay=%ld\n", (long)delay);
348 /* 334 /*
349 * Scheduling of packet transmissions [RFC 3448, 4.6] 335 * Scheduling of packet transmissions [RFC 3448, 4.6]
@@ -353,10 +339,10 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
353 * else 339 * else
354 * // send the packet in (t_nom - t_now) milliseconds. 340 * // send the packet in (t_nom - t_now) milliseconds.
355 */ 341 */
356 if (delay - (s64)hctx->ccid3hctx_delta >= 1000) 342 if (delay - (s64)hc->tx_delta >= 1000)
357 return (u32)delay / 1000L; 343 return (u32)delay / 1000L;
358 344
359 ccid3_hc_tx_update_win_count(hctx, now); 345 ccid3_hc_tx_update_win_count(hc, now);
360 break; 346 break;
361 case TFRC_SSTATE_TERM: 347 case TFRC_SSTATE_TERM:
362 DCCP_BUG("%s(%p) - Illegal state TERM", dccp_role(sk), sk); 348 DCCP_BUG("%s(%p) - Illegal state TERM", dccp_role(sk), sk);
@@ -365,28 +351,27 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
365 351
366 /* prepare to send now (add options etc.) */ 352 /* prepare to send now (add options etc.) */
367 dp->dccps_hc_tx_insert_options = 1; 353 dp->dccps_hc_tx_insert_options = 1;
368 DCCP_SKB_CB(skb)->dccpd_ccval = hctx->ccid3hctx_last_win_count; 354 DCCP_SKB_CB(skb)->dccpd_ccval = hc->tx_last_win_count;
369 355
370 /* set the nominal send time for the next following packet */ 356 /* set the nominal send time for the next following packet */
371 hctx->ccid3hctx_t_nom = ktime_add_us(hctx->ccid3hctx_t_nom, 357 hc->tx_t_nom = ktime_add_us(hc->tx_t_nom, hc->tx_t_ipi);
372 hctx->ccid3hctx_t_ipi);
373 return 0; 358 return 0;
374} 359}
375 360
376static void ccid3_hc_tx_packet_sent(struct sock *sk, int more, 361static void ccid3_hc_tx_packet_sent(struct sock *sk, int more,
377 unsigned int len) 362 unsigned int len)
378{ 363{
379 struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); 364 struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
380 365
381 ccid3_hc_tx_update_s(hctx, len); 366 ccid3_hc_tx_update_s(hc, len);
382 367
383 if (tfrc_tx_hist_add(&hctx->ccid3hctx_hist, dccp_sk(sk)->dccps_gss)) 368 if (tfrc_tx_hist_add(&hc->tx_hist, dccp_sk(sk)->dccps_gss))
384 DCCP_CRIT("packet history - out of memory!"); 369 DCCP_CRIT("packet history - out of memory!");
385} 370}
386 371
387static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) 372static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
388{ 373{
389 struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); 374 struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
390 struct ccid3_options_received *opt_recv; 375 struct ccid3_options_received *opt_recv;
391 ktime_t now; 376 ktime_t now;
392 unsigned long t_nfb; 377 unsigned long t_nfb;
@@ -397,15 +382,15 @@ static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
397 DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_DATAACK)) 382 DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_DATAACK))
398 return; 383 return;
399 /* ... and only in the established state */ 384 /* ... and only in the established state */
400 if (hctx->ccid3hctx_state != TFRC_SSTATE_FBACK && 385 if (hc->tx_state != TFRC_SSTATE_FBACK &&
401 hctx->ccid3hctx_state != TFRC_SSTATE_NO_FBACK) 386 hc->tx_state != TFRC_SSTATE_NO_FBACK)
402 return; 387 return;
403 388
404 opt_recv = &hctx->ccid3hctx_options_received; 389 opt_recv = &hc->tx_options_received;
405 now = ktime_get_real(); 390 now = ktime_get_real();
406 391
407 /* Estimate RTT from history if ACK number is valid */ 392 /* Estimate RTT from history if ACK number is valid */
408 r_sample = tfrc_tx_hist_rtt(hctx->ccid3hctx_hist, 393 r_sample = tfrc_tx_hist_rtt(hc->tx_hist,
409 DCCP_SKB_CB(skb)->dccpd_ack_seq, now); 394 DCCP_SKB_CB(skb)->dccpd_ack_seq, now);
410 if (r_sample == 0) { 395 if (r_sample == 0) {
411 DCCP_WARN("%s(%p): %s with bogus ACK-%llu\n", dccp_role(sk), sk, 396 DCCP_WARN("%s(%p): %s with bogus ACK-%llu\n", dccp_role(sk), sk,
@@ -415,37 +400,37 @@ static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
415 } 400 }
416 401
417 /* Update receive rate in units of 64 * bytes/second */ 402 /* Update receive rate in units of 64 * bytes/second */
418 hctx->ccid3hctx_x_recv = opt_recv->ccid3or_receive_rate; 403 hc->tx_x_recv = opt_recv->ccid3or_receive_rate;
419 hctx->ccid3hctx_x_recv <<= 6; 404 hc->tx_x_recv <<= 6;
420 405
421 /* Update loss event rate (which is scaled by 1e6) */ 406 /* Update loss event rate (which is scaled by 1e6) */
422 pinv = opt_recv->ccid3or_loss_event_rate; 407 pinv = opt_recv->ccid3or_loss_event_rate;
423 if (pinv == ~0U || pinv == 0) /* see RFC 4342, 8.5 */ 408 if (pinv == ~0U || pinv == 0) /* see RFC 4342, 8.5 */
424 hctx->ccid3hctx_p = 0; 409 hc->tx_p = 0;
425 else /* can not exceed 100% */ 410 else /* can not exceed 100% */
426 hctx->ccid3hctx_p = scaled_div(1, pinv); 411 hc->tx_p = scaled_div(1, pinv);
427 /* 412 /*
428 * Validate new RTT sample and update moving average 413 * Validate new RTT sample and update moving average
429 */ 414 */
430 r_sample = dccp_sample_rtt(sk, r_sample); 415 r_sample = dccp_sample_rtt(sk, r_sample);
431 hctx->ccid3hctx_rtt = tfrc_ewma(hctx->ccid3hctx_rtt, r_sample, 9); 416 hc->tx_rtt = tfrc_ewma(hc->tx_rtt, r_sample, 9);
432 /* 417 /*
433 * Update allowed sending rate X as per draft rfc3448bis-00, 4.2/3 418 * Update allowed sending rate X as per draft rfc3448bis-00, 4.2/3
434 */ 419 */
435 if (hctx->ccid3hctx_state == TFRC_SSTATE_NO_FBACK) { 420 if (hc->tx_state == TFRC_SSTATE_NO_FBACK) {
436 ccid3_hc_tx_set_state(sk, TFRC_SSTATE_FBACK); 421 ccid3_hc_tx_set_state(sk, TFRC_SSTATE_FBACK);
437 422
438 if (hctx->ccid3hctx_t_rto == 0) { 423 if (hc->tx_t_rto == 0) {
439 /* 424 /*
440 * Initial feedback packet: Larger Initial Windows (4.2) 425 * Initial feedback packet: Larger Initial Windows (4.2)
441 */ 426 */
442 hctx->ccid3hctx_x = rfc3390_initial_rate(sk); 427 hc->tx_x = rfc3390_initial_rate(sk);
443 hctx->ccid3hctx_t_ld = now; 428 hc->tx_t_ld = now;
444 429
445 ccid3_update_send_interval(hctx); 430 ccid3_update_send_interval(hc);
446 431
447 goto done_computing_x; 432 goto done_computing_x;
448 } else if (hctx->ccid3hctx_p == 0) { 433 } else if (hc->tx_p == 0) {
449 /* 434 /*
450 * First feedback after nofeedback timer expiry (4.3) 435 * First feedback after nofeedback timer expiry (4.3)
451 */ 436 */
@@ -454,25 +439,20 @@ static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
454 } 439 }
455 440
456 /* Update sending rate (step 4 of [RFC 3448, 4.3]) */ 441 /* Update sending rate (step 4 of [RFC 3448, 4.3]) */
457 if (hctx->ccid3hctx_p > 0) 442 if (hc->tx_p > 0)
458 hctx->ccid3hctx_x_calc = 443 hc->tx_x_calc = tfrc_calc_x(hc->tx_s, hc->tx_rtt, hc->tx_p);
459 tfrc_calc_x(hctx->ccid3hctx_s,
460 hctx->ccid3hctx_rtt,
461 hctx->ccid3hctx_p);
462 ccid3_hc_tx_update_x(sk, &now); 444 ccid3_hc_tx_update_x(sk, &now);
463 445
464done_computing_x: 446done_computing_x:
465 ccid3_pr_debug("%s(%p), RTT=%uus (sample=%uus), s=%u, " 447 ccid3_pr_debug("%s(%p), RTT=%uus (sample=%uus), s=%u, "
466 "p=%u, X_calc=%u, X_recv=%u, X=%u\n", 448 "p=%u, X_calc=%u, X_recv=%u, X=%u\n",
467 dccp_role(sk), 449 dccp_role(sk), sk, hc->tx_rtt, r_sample,
468 sk, hctx->ccid3hctx_rtt, r_sample, 450 hc->tx_s, hc->tx_p, hc->tx_x_calc,
469 hctx->ccid3hctx_s, hctx->ccid3hctx_p, 451 (unsigned)(hc->tx_x_recv >> 6),
470 hctx->ccid3hctx_x_calc, 452 (unsigned)(hc->tx_x >> 6));
471 (unsigned)(hctx->ccid3hctx_x_recv >> 6),
472 (unsigned)(hctx->ccid3hctx_x >> 6));
473 453
474 /* unschedule no feedback timer */ 454 /* unschedule no feedback timer */
475 sk_stop_timer(sk, &hctx->ccid3hctx_no_feedback_timer); 455 sk_stop_timer(sk, &hc->tx_no_feedback_timer);
476 456
477 /* 457 /*
478 * As we have calculated new ipi, delta, t_nom it is possible 458 * As we have calculated new ipi, delta, t_nom it is possible
@@ -486,21 +466,19 @@ done_computing_x:
486 * This can help avoid triggering the nofeedback timer too 466 * This can help avoid triggering the nofeedback timer too
487 * often ('spinning') on LANs with small RTTs. 467 * often ('spinning') on LANs with small RTTs.
488 */ 468 */
489 hctx->ccid3hctx_t_rto = max_t(u32, 4 * hctx->ccid3hctx_rtt, 469 hc->tx_t_rto = max_t(u32, 4 * hc->tx_rtt, (CONFIG_IP_DCCP_CCID3_RTO *
490 (CONFIG_IP_DCCP_CCID3_RTO * 470 (USEC_PER_SEC / 1000)));
491 (USEC_PER_SEC / 1000)));
492 /* 471 /*
493 * Schedule no feedback timer to expire in 472 * Schedule no feedback timer to expire in
494 * max(t_RTO, 2 * s/X) = max(t_RTO, 2 * t_ipi) 473 * max(t_RTO, 2 * s/X) = max(t_RTO, 2 * t_ipi)
495 */ 474 */
496 t_nfb = max(hctx->ccid3hctx_t_rto, 2 * hctx->ccid3hctx_t_ipi); 475 t_nfb = max(hc->tx_t_rto, 2 * hc->tx_t_ipi);
497 476
498 ccid3_pr_debug("%s(%p), Scheduled no feedback timer to " 477 ccid3_pr_debug("%s(%p), Scheduled no feedback timer to "
499 "expire in %lu jiffies (%luus)\n", 478 "expire in %lu jiffies (%luus)\n",
500 dccp_role(sk), 479 dccp_role(sk), sk, usecs_to_jiffies(t_nfb), t_nfb);
501 sk, usecs_to_jiffies(t_nfb), t_nfb);
502 480
503 sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer, 481 sk_reset_timer(sk, &hc->tx_no_feedback_timer,
504 jiffies + usecs_to_jiffies(t_nfb)); 482 jiffies + usecs_to_jiffies(t_nfb));
505} 483}
506 484
@@ -510,11 +488,11 @@ static int ccid3_hc_tx_parse_options(struct sock *sk, unsigned char option,
510{ 488{
511 int rc = 0; 489 int rc = 0;
512 const struct dccp_sock *dp = dccp_sk(sk); 490 const struct dccp_sock *dp = dccp_sk(sk);
513 struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); 491 struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
514 struct ccid3_options_received *opt_recv; 492 struct ccid3_options_received *opt_recv;
515 __be32 opt_val; 493 __be32 opt_val;
516 494
517 opt_recv = &hctx->ccid3hctx_options_received; 495 opt_recv = &hc->tx_options_received;
518 496
519 if (opt_recv->ccid3or_seqno != dp->dccps_gsr) { 497 if (opt_recv->ccid3or_seqno != dp->dccps_gsr) {
520 opt_recv->ccid3or_seqno = dp->dccps_gsr; 498 opt_recv->ccid3or_seqno = dp->dccps_gsr;
@@ -568,56 +546,55 @@ static int ccid3_hc_tx_parse_options(struct sock *sk, unsigned char option,
568 546
569static int ccid3_hc_tx_init(struct ccid *ccid, struct sock *sk) 547static int ccid3_hc_tx_init(struct ccid *ccid, struct sock *sk)
570{ 548{
571 struct ccid3_hc_tx_sock *hctx = ccid_priv(ccid); 549 struct ccid3_hc_tx_sock *hc = ccid_priv(ccid);
572 550
573 hctx->ccid3hctx_state = TFRC_SSTATE_NO_SENT; 551 hc->tx_state = TFRC_SSTATE_NO_SENT;
574 hctx->ccid3hctx_hist = NULL; 552 hc->tx_hist = NULL;
575 setup_timer(&hctx->ccid3hctx_no_feedback_timer, 553 setup_timer(&hc->tx_no_feedback_timer,
576 ccid3_hc_tx_no_feedback_timer, (unsigned long)sk); 554 ccid3_hc_tx_no_feedback_timer, (unsigned long)sk);
577
578 return 0; 555 return 0;
579} 556}
580 557
581static void ccid3_hc_tx_exit(struct sock *sk) 558static void ccid3_hc_tx_exit(struct sock *sk)
582{ 559{
583 struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); 560 struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
584 561
585 ccid3_hc_tx_set_state(sk, TFRC_SSTATE_TERM); 562 ccid3_hc_tx_set_state(sk, TFRC_SSTATE_TERM);
586 sk_stop_timer(sk, &hctx->ccid3hctx_no_feedback_timer); 563 sk_stop_timer(sk, &hc->tx_no_feedback_timer);
587 564
588 tfrc_tx_hist_purge(&hctx->ccid3hctx_hist); 565 tfrc_tx_hist_purge(&hc->tx_hist);
589} 566}
590 567
591static void ccid3_hc_tx_get_info(struct sock *sk, struct tcp_info *info) 568static void ccid3_hc_tx_get_info(struct sock *sk, struct tcp_info *info)
592{ 569{
593 struct ccid3_hc_tx_sock *hctx; 570 struct ccid3_hc_tx_sock *hc;
594 571
595 /* Listen socks doesn't have a private CCID block */ 572 /* Listen socks doesn't have a private CCID block */
596 if (sk->sk_state == DCCP_LISTEN) 573 if (sk->sk_state == DCCP_LISTEN)
597 return; 574 return;
598 575
599 hctx = ccid3_hc_tx_sk(sk); 576 hc = ccid3_hc_tx_sk(sk);
600 info->tcpi_rto = hctx->ccid3hctx_t_rto; 577 info->tcpi_rto = hc->tx_t_rto;
601 info->tcpi_rtt = hctx->ccid3hctx_rtt; 578 info->tcpi_rtt = hc->tx_rtt;
602} 579}
603 580
604static int ccid3_hc_tx_getsockopt(struct sock *sk, const int optname, int len, 581static int ccid3_hc_tx_getsockopt(struct sock *sk, const int optname, int len,
605 u32 __user *optval, int __user *optlen) 582 u32 __user *optval, int __user *optlen)
606{ 583{
607 const struct ccid3_hc_tx_sock *hctx; 584 const struct ccid3_hc_tx_sock *hc;
608 const void *val; 585 const void *val;
609 586
610 /* Listen socks doesn't have a private CCID block */ 587 /* Listen socks doesn't have a private CCID block */
611 if (sk->sk_state == DCCP_LISTEN) 588 if (sk->sk_state == DCCP_LISTEN)
612 return -EINVAL; 589 return -EINVAL;
613 590
614 hctx = ccid3_hc_tx_sk(sk); 591 hc = ccid3_hc_tx_sk(sk);
615 switch (optname) { 592 switch (optname) {
616 case DCCP_SOCKOPT_CCID_TX_INFO: 593 case DCCP_SOCKOPT_CCID_TX_INFO:
617 if (len < sizeof(hctx->ccid3hctx_tfrc)) 594 if (len < sizeof(hc->tx_tfrc))
618 return -EINVAL; 595 return -EINVAL;
619 len = sizeof(hctx->ccid3hctx_tfrc); 596 len = sizeof(hc->tx_tfrc);
620 val = &hctx->ccid3hctx_tfrc; 597 val = &hc->tx_tfrc;
621 break; 598 break;
622 default: 599 default:
623 return -ENOPROTOOPT; 600 return -ENOPROTOOPT;
@@ -657,34 +634,34 @@ static const char *ccid3_rx_state_name(enum ccid3_hc_rx_states state)
657static void ccid3_hc_rx_set_state(struct sock *sk, 634static void ccid3_hc_rx_set_state(struct sock *sk,
658 enum ccid3_hc_rx_states state) 635 enum ccid3_hc_rx_states state)
659{ 636{
660 struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk); 637 struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
661 enum ccid3_hc_rx_states oldstate = hcrx->ccid3hcrx_state; 638 enum ccid3_hc_rx_states oldstate = hc->rx_state;
662 639
663 ccid3_pr_debug("%s(%p) %-8.8s -> %s\n", 640 ccid3_pr_debug("%s(%p) %-8.8s -> %s\n",
664 dccp_role(sk), sk, ccid3_rx_state_name(oldstate), 641 dccp_role(sk), sk, ccid3_rx_state_name(oldstate),
665 ccid3_rx_state_name(state)); 642 ccid3_rx_state_name(state));
666 WARN_ON(state == oldstate); 643 WARN_ON(state == oldstate);
667 hcrx->ccid3hcrx_state = state; 644 hc->rx_state = state;
668} 645}
669 646
670static void ccid3_hc_rx_send_feedback(struct sock *sk, 647static void ccid3_hc_rx_send_feedback(struct sock *sk,
671 const struct sk_buff *skb, 648 const struct sk_buff *skb,
672 enum ccid3_fback_type fbtype) 649 enum ccid3_fback_type fbtype)
673{ 650{
674 struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk); 651 struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
675 struct dccp_sock *dp = dccp_sk(sk); 652 struct dccp_sock *dp = dccp_sk(sk);
676 ktime_t now; 653 ktime_t now;
677 s64 delta = 0; 654 s64 delta = 0;
678 655
679 if (unlikely(hcrx->ccid3hcrx_state == TFRC_RSTATE_TERM)) 656 if (unlikely(hc->rx_state == TFRC_RSTATE_TERM))
680 return; 657 return;
681 658
682 now = ktime_get_real(); 659 now = ktime_get_real();
683 660
684 switch (fbtype) { 661 switch (fbtype) {
685 case CCID3_FBACK_INITIAL: 662 case CCID3_FBACK_INITIAL:
686 hcrx->ccid3hcrx_x_recv = 0; 663 hc->rx_x_recv = 0;
687 hcrx->ccid3hcrx_pinv = ~0U; /* see RFC 4342, 8.5 */ 664 hc->rx_pinv = ~0U; /* see RFC 4342, 8.5 */
688 break; 665 break;
689 case CCID3_FBACK_PARAM_CHANGE: 666 case CCID3_FBACK_PARAM_CHANGE:
690 /* 667 /*
@@ -697,27 +674,26 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk,
697 * the number of bytes since last feedback. 674 * the number of bytes since last feedback.
698 * This is a safe fallback, since X is bounded above by X_calc. 675 * This is a safe fallback, since X is bounded above by X_calc.
699 */ 676 */
700 if (hcrx->ccid3hcrx_x_recv > 0) 677 if (hc->rx_x_recv > 0)
701 break; 678 break;
702 /* fall through */ 679 /* fall through */
703 case CCID3_FBACK_PERIODIC: 680 case CCID3_FBACK_PERIODIC:
704 delta = ktime_us_delta(now, hcrx->ccid3hcrx_tstamp_last_feedback); 681 delta = ktime_us_delta(now, hc->rx_tstamp_last_feedback);
705 if (delta <= 0) 682 if (delta <= 0)
706 DCCP_BUG("delta (%ld) <= 0", (long)delta); 683 DCCP_BUG("delta (%ld) <= 0", (long)delta);
707 else 684 else
708 hcrx->ccid3hcrx_x_recv = 685 hc->rx_x_recv = scaled_div32(hc->rx_bytes_recv, delta);
709 scaled_div32(hcrx->ccid3hcrx_bytes_recv, delta);
710 break; 686 break;
711 default: 687 default:
712 return; 688 return;
713 } 689 }
714 690
715 ccid3_pr_debug("Interval %ldusec, X_recv=%u, 1/p=%u\n", (long)delta, 691 ccid3_pr_debug("Interval %ldusec, X_recv=%u, 1/p=%u\n", (long)delta,
716 hcrx->ccid3hcrx_x_recv, hcrx->ccid3hcrx_pinv); 692 hc->rx_x_recv, hc->rx_pinv);
717 693
718 hcrx->ccid3hcrx_tstamp_last_feedback = now; 694 hc->rx_tstamp_last_feedback = now;
719 hcrx->ccid3hcrx_last_counter = dccp_hdr(skb)->dccph_ccval; 695 hc->rx_last_counter = dccp_hdr(skb)->dccph_ccval;
720 hcrx->ccid3hcrx_bytes_recv = 0; 696 hc->rx_bytes_recv = 0;
721 697
722 dp->dccps_hc_rx_insert_options = 1; 698 dp->dccps_hc_rx_insert_options = 1;
723 dccp_send_ack(sk); 699 dccp_send_ack(sk);
@@ -725,19 +701,19 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk,
725 701
726static int ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb) 702static int ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb)
727{ 703{
728 const struct ccid3_hc_rx_sock *hcrx; 704 const struct ccid3_hc_rx_sock *hc;
729 __be32 x_recv, pinv; 705 __be32 x_recv, pinv;
730 706
731 if (!(sk->sk_state == DCCP_OPEN || sk->sk_state == DCCP_PARTOPEN)) 707 if (!(sk->sk_state == DCCP_OPEN || sk->sk_state == DCCP_PARTOPEN))
732 return 0; 708 return 0;
733 709
734 hcrx = ccid3_hc_rx_sk(sk); 710 hc = ccid3_hc_rx_sk(sk);
735 711
736 if (dccp_packet_without_ack(skb)) 712 if (dccp_packet_without_ack(skb))
737 return 0; 713 return 0;
738 714
739 x_recv = htonl(hcrx->ccid3hcrx_x_recv); 715 x_recv = htonl(hc->rx_x_recv);
740 pinv = htonl(hcrx->ccid3hcrx_pinv); 716 pinv = htonl(hc->rx_pinv);
741 717
742 if (dccp_insert_option(sk, skb, TFRC_OPT_LOSS_EVENT_RATE, 718 if (dccp_insert_option(sk, skb, TFRC_OPT_LOSS_EVENT_RATE,
743 &pinv, sizeof(pinv)) || 719 &pinv, sizeof(pinv)) ||
@@ -760,26 +736,26 @@ static int ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb)
760 */ 736 */
761static u32 ccid3_first_li(struct sock *sk) 737static u32 ccid3_first_li(struct sock *sk)
762{ 738{
763 struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk); 739 struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
764 u32 x_recv, p, delta; 740 u32 x_recv, p, delta;
765 u64 fval; 741 u64 fval;
766 742
767 if (hcrx->ccid3hcrx_rtt == 0) { 743 if (hc->rx_rtt == 0) {
768 DCCP_WARN("No RTT estimate available, using fallback RTT\n"); 744 DCCP_WARN("No RTT estimate available, using fallback RTT\n");
769 hcrx->ccid3hcrx_rtt = DCCP_FALLBACK_RTT; 745 hc->rx_rtt = DCCP_FALLBACK_RTT;
770 } 746 }
771 747
772 delta = ktime_to_us(net_timedelta(hcrx->ccid3hcrx_tstamp_last_feedback)); 748 delta = ktime_to_us(net_timedelta(hc->rx_tstamp_last_feedback));
773 x_recv = scaled_div32(hcrx->ccid3hcrx_bytes_recv, delta); 749 x_recv = scaled_div32(hc->rx_bytes_recv, delta);
774 if (x_recv == 0) { /* would also trigger divide-by-zero */ 750 if (x_recv == 0) { /* would also trigger divide-by-zero */
775 DCCP_WARN("X_recv==0\n"); 751 DCCP_WARN("X_recv==0\n");
776 if ((x_recv = hcrx->ccid3hcrx_x_recv) == 0) { 752 if ((x_recv = hc->rx_x_recv) == 0) {
777 DCCP_BUG("stored value of X_recv is zero"); 753 DCCP_BUG("stored value of X_recv is zero");
778 return ~0U; 754 return ~0U;
779 } 755 }
780 } 756 }
781 757
782 fval = scaled_div(hcrx->ccid3hcrx_s, hcrx->ccid3hcrx_rtt); 758 fval = scaled_div(hc->rx_s, hc->rx_rtt);
783 fval = scaled_div32(fval, x_recv); 759 fval = scaled_div32(fval, x_recv);
784 p = tfrc_calc_x_reverse_lookup(fval); 760 p = tfrc_calc_x_reverse_lookup(fval);
785 761
@@ -791,19 +767,19 @@ static u32 ccid3_first_li(struct sock *sk)
791 767
792static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) 768static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
793{ 769{
794 struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk); 770 struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
795 enum ccid3_fback_type do_feedback = CCID3_FBACK_NONE; 771 enum ccid3_fback_type do_feedback = CCID3_FBACK_NONE;
796 const u64 ndp = dccp_sk(sk)->dccps_options_received.dccpor_ndp; 772 const u64 ndp = dccp_sk(sk)->dccps_options_received.dccpor_ndp;
797 const bool is_data_packet = dccp_data_packet(skb); 773 const bool is_data_packet = dccp_data_packet(skb);
798 774
799 if (unlikely(hcrx->ccid3hcrx_state == TFRC_RSTATE_NO_DATA)) { 775 if (unlikely(hc->rx_state == TFRC_RSTATE_NO_DATA)) {
800 if (is_data_packet) { 776 if (is_data_packet) {
801 const u32 payload = skb->len - dccp_hdr(skb)->dccph_doff * 4; 777 const u32 payload = skb->len - dccp_hdr(skb)->dccph_doff * 4;
802 do_feedback = CCID3_FBACK_INITIAL; 778 do_feedback = CCID3_FBACK_INITIAL;
803 ccid3_hc_rx_set_state(sk, TFRC_RSTATE_DATA); 779 ccid3_hc_rx_set_state(sk, TFRC_RSTATE_DATA);
804 hcrx->ccid3hcrx_s = payload; 780 hc->rx_s = payload;
805 /* 781 /*
806 * Not necessary to update ccid3hcrx_bytes_recv here, 782 * Not necessary to update rx_bytes_recv here,
807 * since X_recv = 0 for the first feedback packet (cf. 783 * since X_recv = 0 for the first feedback packet (cf.
808 * RFC 3448, 6.3) -- gerrit 784 * RFC 3448, 6.3) -- gerrit
809 */ 785 */
@@ -811,7 +787,7 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
811 goto update_records; 787 goto update_records;
812 } 788 }
813 789
814 if (tfrc_rx_hist_duplicate(&hcrx->ccid3hcrx_hist, skb)) 790 if (tfrc_rx_hist_duplicate(&hc->rx_hist, skb))
815 return; /* done receiving */ 791 return; /* done receiving */
816 792
817 if (is_data_packet) { 793 if (is_data_packet) {
@@ -819,20 +795,20 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
819 /* 795 /*
820 * Update moving-average of s and the sum of received payload bytes 796 * Update moving-average of s and the sum of received payload bytes
821 */ 797 */
822 hcrx->ccid3hcrx_s = tfrc_ewma(hcrx->ccid3hcrx_s, payload, 9); 798 hc->rx_s = tfrc_ewma(hc->rx_s, payload, 9);
823 hcrx->ccid3hcrx_bytes_recv += payload; 799 hc->rx_bytes_recv += payload;
824 } 800 }
825 801
826 /* 802 /*
827 * Perform loss detection and handle pending losses 803 * Perform loss detection and handle pending losses
828 */ 804 */
829 if (tfrc_rx_handle_loss(&hcrx->ccid3hcrx_hist, &hcrx->ccid3hcrx_li_hist, 805 if (tfrc_rx_handle_loss(&hc->rx_hist, &hc->rx_li_hist,
830 skb, ndp, ccid3_first_li, sk)) { 806 skb, ndp, ccid3_first_li, sk)) {
831 do_feedback = CCID3_FBACK_PARAM_CHANGE; 807 do_feedback = CCID3_FBACK_PARAM_CHANGE;
832 goto done_receiving; 808 goto done_receiving;
833 } 809 }
834 810
835 if (tfrc_rx_hist_loss_pending(&hcrx->ccid3hcrx_hist)) 811 if (tfrc_rx_hist_loss_pending(&hc->rx_hist))
836 return; /* done receiving */ 812 return; /* done receiving */
837 813
838 /* 814 /*
@@ -841,17 +817,17 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
841 if (unlikely(!is_data_packet)) 817 if (unlikely(!is_data_packet))
842 goto update_records; 818 goto update_records;
843 819
844 if (!tfrc_lh_is_initialised(&hcrx->ccid3hcrx_li_hist)) { 820 if (!tfrc_lh_is_initialised(&hc->rx_li_hist)) {
845 const u32 sample = tfrc_rx_hist_sample_rtt(&hcrx->ccid3hcrx_hist, skb); 821 const u32 sample = tfrc_rx_hist_sample_rtt(&hc->rx_hist, skb);
846 /* 822 /*
847 * Empty loss history: no loss so far, hence p stays 0. 823 * Empty loss history: no loss so far, hence p stays 0.
848 * Sample RTT values, since an RTT estimate is required for the 824 * Sample RTT values, since an RTT estimate is required for the
849 * computation of p when the first loss occurs; RFC 3448, 6.3.1. 825 * computation of p when the first loss occurs; RFC 3448, 6.3.1.
850 */ 826 */
851 if (sample != 0) 827 if (sample != 0)
852 hcrx->ccid3hcrx_rtt = tfrc_ewma(hcrx->ccid3hcrx_rtt, sample, 9); 828 hc->rx_rtt = tfrc_ewma(hc->rx_rtt, sample, 9);
853 829
854 } else if (tfrc_lh_update_i_mean(&hcrx->ccid3hcrx_li_hist, skb)) { 830 } else if (tfrc_lh_update_i_mean(&hc->rx_li_hist, skb)) {
855 /* 831 /*
856 * Step (3) of [RFC 3448, 6.1]: Recompute I_mean and, if I_mean 832 * Step (3) of [RFC 3448, 6.1]: Recompute I_mean and, if I_mean
857 * has decreased (resp. p has increased), send feedback now. 833 * has decreased (resp. p has increased), send feedback now.
@@ -862,11 +838,11 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
862 /* 838 /*
863 * Check if the periodic once-per-RTT feedback is due; RFC 4342, 10.3 839 * Check if the periodic once-per-RTT feedback is due; RFC 4342, 10.3
864 */ 840 */
865 if (SUB16(dccp_hdr(skb)->dccph_ccval, hcrx->ccid3hcrx_last_counter) > 3) 841 if (SUB16(dccp_hdr(skb)->dccph_ccval, hc->rx_last_counter) > 3)
866 do_feedback = CCID3_FBACK_PERIODIC; 842 do_feedback = CCID3_FBACK_PERIODIC;
867 843
868update_records: 844update_records:
869 tfrc_rx_hist_add_packet(&hcrx->ccid3hcrx_hist, skb, ndp); 845 tfrc_rx_hist_add_packet(&hc->rx_hist, skb, ndp);
870 846
871done_receiving: 847done_receiving:
872 if (do_feedback) 848 if (do_feedback)
@@ -875,41 +851,41 @@ done_receiving:
875 851
876static int ccid3_hc_rx_init(struct ccid *ccid, struct sock *sk) 852static int ccid3_hc_rx_init(struct ccid *ccid, struct sock *sk)
877{ 853{
878 struct ccid3_hc_rx_sock *hcrx = ccid_priv(ccid); 854 struct ccid3_hc_rx_sock *hc = ccid_priv(ccid);
879 855
880 hcrx->ccid3hcrx_state = TFRC_RSTATE_NO_DATA; 856 hc->rx_state = TFRC_RSTATE_NO_DATA;
881 tfrc_lh_init(&hcrx->ccid3hcrx_li_hist); 857 tfrc_lh_init(&hc->rx_li_hist);
882 return tfrc_rx_hist_alloc(&hcrx->ccid3hcrx_hist); 858 return tfrc_rx_hist_alloc(&hc->rx_hist);
883} 859}
884 860
885static void ccid3_hc_rx_exit(struct sock *sk) 861static void ccid3_hc_rx_exit(struct sock *sk)
886{ 862{
887 struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk); 863 struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
888 864
889 ccid3_hc_rx_set_state(sk, TFRC_RSTATE_TERM); 865 ccid3_hc_rx_set_state(sk, TFRC_RSTATE_TERM);
890 866
891 tfrc_rx_hist_purge(&hcrx->ccid3hcrx_hist); 867 tfrc_rx_hist_purge(&hc->rx_hist);
892 tfrc_lh_cleanup(&hcrx->ccid3hcrx_li_hist); 868 tfrc_lh_cleanup(&hc->rx_li_hist);
893} 869}
894 870
895static void ccid3_hc_rx_get_info(struct sock *sk, struct tcp_info *info) 871static void ccid3_hc_rx_get_info(struct sock *sk, struct tcp_info *info)
896{ 872{
897 const struct ccid3_hc_rx_sock *hcrx; 873 const struct ccid3_hc_rx_sock *hc;
898 874
899 /* Listen socks doesn't have a private CCID block */ 875 /* Listen socks doesn't have a private CCID block */
900 if (sk->sk_state == DCCP_LISTEN) 876 if (sk->sk_state == DCCP_LISTEN)
901 return; 877 return;
902 878
903 hcrx = ccid3_hc_rx_sk(sk); 879 hc = ccid3_hc_rx_sk(sk);
904 info->tcpi_ca_state = hcrx->ccid3hcrx_state; 880 info->tcpi_ca_state = hc->rx_state;
905 info->tcpi_options |= TCPI_OPT_TIMESTAMPS; 881 info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
906 info->tcpi_rcv_rtt = hcrx->ccid3hcrx_rtt; 882 info->tcpi_rcv_rtt = hc->rx_rtt;
907} 883}
908 884
909static int ccid3_hc_rx_getsockopt(struct sock *sk, const int optname, int len, 885static int ccid3_hc_rx_getsockopt(struct sock *sk, const int optname, int len,
910 u32 __user *optval, int __user *optlen) 886 u32 __user *optval, int __user *optlen)
911{ 887{
912 const struct ccid3_hc_rx_sock *hcrx; 888 const struct ccid3_hc_rx_sock *hc;
913 struct tfrc_rx_info rx_info; 889 struct tfrc_rx_info rx_info;
914 const void *val; 890 const void *val;
915 891
@@ -917,15 +893,15 @@ static int ccid3_hc_rx_getsockopt(struct sock *sk, const int optname, int len,
917 if (sk->sk_state == DCCP_LISTEN) 893 if (sk->sk_state == DCCP_LISTEN)
918 return -EINVAL; 894 return -EINVAL;
919 895
920 hcrx = ccid3_hc_rx_sk(sk); 896 hc = ccid3_hc_rx_sk(sk);
921 switch (optname) { 897 switch (optname) {
922 case DCCP_SOCKOPT_CCID_RX_INFO: 898 case DCCP_SOCKOPT_CCID_RX_INFO:
923 if (len < sizeof(rx_info)) 899 if (len < sizeof(rx_info))
924 return -EINVAL; 900 return -EINVAL;
925 rx_info.tfrcrx_x_recv = hcrx->ccid3hcrx_x_recv; 901 rx_info.tfrcrx_x_recv = hc->rx_x_recv;
926 rx_info.tfrcrx_rtt = hcrx->ccid3hcrx_rtt; 902 rx_info.tfrcrx_rtt = hc->rx_rtt;
927 rx_info.tfrcrx_p = hcrx->ccid3hcrx_pinv == 0 ? ~0U : 903 rx_info.tfrcrx_p = hc->rx_pinv == 0 ? ~0U :
928 scaled_div(1, hcrx->ccid3hcrx_pinv); 904 scaled_div(1, hc->rx_pinv);
929 len = sizeof(rx_info); 905 len = sizeof(rx_info);
930 val = &rx_info; 906 val = &rx_info;
931 break; 907 break;
diff --git a/net/dccp/ccids/ccid3.h b/net/dccp/ccids/ccid3.h
index e5a244143846..032635776653 100644
--- a/net/dccp/ccids/ccid3.h
+++ b/net/dccp/ccids/ccid3.h
@@ -75,44 +75,44 @@ enum ccid3_hc_tx_states {
75 75
76/** 76/**
77 * struct ccid3_hc_tx_sock - CCID3 sender half-connection socket 77 * struct ccid3_hc_tx_sock - CCID3 sender half-connection socket
78 * @ccid3hctx_x - Current sending rate in 64 * bytes per second 78 * @tx_x: Current sending rate in 64 * bytes per second
79 * @ccid3hctx_x_recv - Receive rate in 64 * bytes per second 79 * @tx_x_recv: Receive rate in 64 * bytes per second
80 * @ccid3hctx_x_calc - Calculated rate in bytes per second 80 * @tx_x_calc: Calculated rate in bytes per second
81 * @ccid3hctx_rtt - Estimate of current round trip time in usecs 81 * @tx_rtt: Estimate of current round trip time in usecs
82 * @ccid3hctx_p - Current loss event rate (0-1) scaled by 1000000 82 * @tx_p: Current loss event rate (0-1) scaled by 1000000
83 * @ccid3hctx_s - Packet size in bytes 83 * @tx_s: Packet size in bytes
84 * @ccid3hctx_t_rto - Nofeedback Timer setting in usecs 84 * @tx_t_rto: Nofeedback Timer setting in usecs
85 * @ccid3hctx_t_ipi - Interpacket (send) interval (RFC 3448, 4.6) in usecs 85 * @tx_t_ipi: Interpacket (send) interval (RFC 3448, 4.6) in usecs
86 * @ccid3hctx_state - Sender state, one of %ccid3_hc_tx_states 86 * @tx_state: Sender state, one of %ccid3_hc_tx_states
87 * @ccid3hctx_last_win_count - Last window counter sent 87 * @tx_last_win_count: Last window counter sent
88 * @ccid3hctx_t_last_win_count - Timestamp of earliest packet 88 * @tx_t_last_win_count: Timestamp of earliest packet
89 * with last_win_count value sent 89 * with last_win_count value sent
90 * @ccid3hctx_no_feedback_timer - Handle to no feedback timer 90 * @tx_no_feedback_timer: Handle to no feedback timer
91 * @ccid3hctx_t_ld - Time last doubled during slow start 91 * @tx_t_ld: Time last doubled during slow start
92 * @ccid3hctx_t_nom - Nominal send time of next packet 92 * @tx_t_nom: Nominal send time of next packet
93 * @ccid3hctx_delta - Send timer delta (RFC 3448, 4.6) in usecs 93 * @tx_delta: Send timer delta (RFC 3448, 4.6) in usecs
94 * @ccid3hctx_hist - Packet history 94 * @tx_hist: Packet history
95 * @ccid3hctx_options_received - Parsed set of retrieved options 95 * @tx_options_received: Parsed set of retrieved options
96 */ 96 */
97struct ccid3_hc_tx_sock { 97struct ccid3_hc_tx_sock {
98 struct tfrc_tx_info ccid3hctx_tfrc; 98 struct tfrc_tx_info tx_tfrc;
99#define ccid3hctx_x ccid3hctx_tfrc.tfrctx_x 99#define tx_x tx_tfrc.tfrctx_x
100#define ccid3hctx_x_recv ccid3hctx_tfrc.tfrctx_x_recv 100#define tx_x_recv tx_tfrc.tfrctx_x_recv
101#define ccid3hctx_x_calc ccid3hctx_tfrc.tfrctx_x_calc 101#define tx_x_calc tx_tfrc.tfrctx_x_calc
102#define ccid3hctx_rtt ccid3hctx_tfrc.tfrctx_rtt 102#define tx_rtt tx_tfrc.tfrctx_rtt
103#define ccid3hctx_p ccid3hctx_tfrc.tfrctx_p 103#define tx_p tx_tfrc.tfrctx_p
104#define ccid3hctx_t_rto ccid3hctx_tfrc.tfrctx_rto 104#define tx_t_rto tx_tfrc.tfrctx_rto
105#define ccid3hctx_t_ipi ccid3hctx_tfrc.tfrctx_ipi 105#define tx_t_ipi tx_tfrc.tfrctx_ipi
106 u16 ccid3hctx_s; 106 u16 tx_s;
107 enum ccid3_hc_tx_states ccid3hctx_state:8; 107 enum ccid3_hc_tx_states tx_state:8;
108 u8 ccid3hctx_last_win_count; 108 u8 tx_last_win_count;
109 ktime_t ccid3hctx_t_last_win_count; 109 ktime_t tx_t_last_win_count;
110 struct timer_list ccid3hctx_no_feedback_timer; 110 struct timer_list tx_no_feedback_timer;
111 ktime_t ccid3hctx_t_ld; 111 ktime_t tx_t_ld;
112 ktime_t ccid3hctx_t_nom; 112 ktime_t tx_t_nom;
113 u32 ccid3hctx_delta; 113 u32 tx_delta;
114 struct tfrc_tx_hist_entry *ccid3hctx_hist; 114 struct tfrc_tx_hist_entry *tx_hist;
115 struct ccid3_options_received ccid3hctx_options_received; 115 struct ccid3_options_received tx_options_received;
116}; 116};
117 117
118static inline struct ccid3_hc_tx_sock *ccid3_hc_tx_sk(const struct sock *sk) 118static inline struct ccid3_hc_tx_sock *ccid3_hc_tx_sk(const struct sock *sk)
@@ -131,32 +131,32 @@ enum ccid3_hc_rx_states {
131 131
132/** 132/**
133 * struct ccid3_hc_rx_sock - CCID3 receiver half-connection socket 133 * struct ccid3_hc_rx_sock - CCID3 receiver half-connection socket
134 * @ccid3hcrx_x_recv - Receiver estimate of send rate (RFC 3448 4.3) 134 * @rx_x_recv: Receiver estimate of send rate (RFC 3448 4.3)
135 * @ccid3hcrx_rtt - Receiver estimate of rtt (non-standard) 135 * @rx_rtt: Receiver estimate of rtt (non-standard)
136 * @ccid3hcrx_p - Current loss event rate (RFC 3448 5.4) 136 * @rx_p: Current loss event rate (RFC 3448 5.4)
137 * @ccid3hcrx_last_counter - Tracks window counter (RFC 4342, 8.1) 137 * @rx_last_counter: Tracks window counter (RFC 4342, 8.1)
138 * @ccid3hcrx_state - Receiver state, one of %ccid3_hc_rx_states 138 * @rx_state: Receiver state, one of %ccid3_hc_rx_states
139 * @ccid3hcrx_bytes_recv - Total sum of DCCP payload bytes 139 * @rx_bytes_recv: Total sum of DCCP payload bytes
140 * @ccid3hcrx_x_recv - Receiver estimate of send rate (RFC 3448, sec. 4.3) 140 * @rx_x_recv: Receiver estimate of send rate (RFC 3448, sec. 4.3)
141 * @ccid3hcrx_rtt - Receiver estimate of RTT 141 * @rx_rtt: Receiver estimate of RTT
142 * @ccid3hcrx_tstamp_last_feedback - Time at which last feedback was sent 142 * @rx_tstamp_last_feedback: Time at which last feedback was sent
143 * @ccid3hcrx_tstamp_last_ack - Time at which last feedback was sent 143 * @rx_tstamp_last_ack: Time at which last feedback was sent
144 * @ccid3hcrx_hist - Packet history (loss detection + RTT sampling) 144 * @rx_hist: Packet history (loss detection + RTT sampling)
145 * @ccid3hcrx_li_hist - Loss Interval database 145 * @rx_li_hist: Loss Interval database
146 * @ccid3hcrx_s - Received packet size in bytes 146 * @rx_s: Received packet size in bytes
147 * @ccid3hcrx_pinv - Inverse of Loss Event Rate (RFC 4342, sec. 8.5) 147 * @rx_pinv: Inverse of Loss Event Rate (RFC 4342, sec. 8.5)
148 */ 148 */
149struct ccid3_hc_rx_sock { 149struct ccid3_hc_rx_sock {
150 u8 ccid3hcrx_last_counter:4; 150 u8 rx_last_counter:4;
151 enum ccid3_hc_rx_states ccid3hcrx_state:8; 151 enum ccid3_hc_rx_states rx_state:8;
152 u32 ccid3hcrx_bytes_recv; 152 u32 rx_bytes_recv;
153 u32 ccid3hcrx_x_recv; 153 u32 rx_x_recv;
154 u32 ccid3hcrx_rtt; 154 u32 rx_rtt;
155 ktime_t ccid3hcrx_tstamp_last_feedback; 155 ktime_t rx_tstamp_last_feedback;
156 struct tfrc_rx_hist ccid3hcrx_hist; 156 struct tfrc_rx_hist rx_hist;
157 struct tfrc_loss_hist ccid3hcrx_li_hist; 157 struct tfrc_loss_hist rx_li_hist;
158 u16 ccid3hcrx_s; 158 u16 rx_s;
159#define ccid3hcrx_pinv ccid3hcrx_li_hist.i_mean 159#define rx_pinv rx_li_hist.i_mean
160}; 160};
161 161
162static inline struct ccid3_hc_rx_sock *ccid3_hc_rx_sk(const struct sock *sk) 162static inline struct ccid3_hc_rx_sock *ccid3_hc_rx_sk(const struct sock *sk)
diff --git a/net/dccp/feat.c b/net/dccp/feat.c
index 972b8dc918d6..df7dd26cf07e 100644
--- a/net/dccp/feat.c
+++ b/net/dccp/feat.c
@@ -22,6 +22,7 @@
22 * 2 of the License, or (at your option) any later version. 22 * 2 of the License, or (at your option) any later version.
23 */ 23 */
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/slab.h>
25#include "ccid.h" 26#include "ccid.h"
26#include "feat.h" 27#include "feat.h"
27 28
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 7648f316310f..9ec717426024 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -12,6 +12,7 @@
12 12
13#include <linux/dccp.h> 13#include <linux/dccp.h>
14#include <linux/skbuff.h> 14#include <linux/skbuff.h>
15#include <linux/slab.h>
15 16
16#include <net/sock.h> 17#include <net/sock.h>
17 18
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 7302e1498d46..52ffa1cde15a 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -12,6 +12,7 @@
12 12
13#include <linux/dccp.h> 13#include <linux/dccp.h>
14#include <linux/icmp.h> 14#include <linux/icmp.h>
15#include <linux/slab.h>
15#include <linux/module.h> 16#include <linux/module.h>
16#include <linux/skbuff.h> 17#include <linux/skbuff.h>
17#include <linux/random.h> 18#include <linux/random.h>
@@ -62,10 +63,10 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
62 nexthop = inet->opt->faddr; 63 nexthop = inet->opt->faddr;
63 } 64 }
64 65
65 tmp = ip_route_connect(&rt, nexthop, inet->saddr, 66 tmp = ip_route_connect(&rt, nexthop, inet->inet_saddr,
66 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if, 67 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
67 IPPROTO_DCCP, 68 IPPROTO_DCCP,
68 inet->sport, usin->sin_port, sk, 1); 69 inet->inet_sport, usin->sin_port, sk, 1);
69 if (tmp < 0) 70 if (tmp < 0)
70 return tmp; 71 return tmp;
71 72
@@ -77,12 +78,12 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
77 if (inet->opt == NULL || !inet->opt->srr) 78 if (inet->opt == NULL || !inet->opt->srr)
78 daddr = rt->rt_dst; 79 daddr = rt->rt_dst;
79 80
80 if (inet->saddr == 0) 81 if (inet->inet_saddr == 0)
81 inet->saddr = rt->rt_src; 82 inet->inet_saddr = rt->rt_src;
82 inet->rcv_saddr = inet->saddr; 83 inet->inet_rcv_saddr = inet->inet_saddr;
83 84
84 inet->dport = usin->sin_port; 85 inet->inet_dport = usin->sin_port;
85 inet->daddr = daddr; 86 inet->inet_daddr = daddr;
86 87
87 inet_csk(sk)->icsk_ext_hdr_len = 0; 88 inet_csk(sk)->icsk_ext_hdr_len = 0;
88 if (inet->opt != NULL) 89 if (inet->opt != NULL)
@@ -98,17 +99,19 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
98 if (err != 0) 99 if (err != 0)
99 goto failure; 100 goto failure;
100 101
101 err = ip_route_newports(&rt, IPPROTO_DCCP, inet->sport, inet->dport, 102 err = ip_route_newports(&rt, IPPROTO_DCCP, inet->inet_sport,
102 sk); 103 inet->inet_dport, sk);
103 if (err != 0) 104 if (err != 0)
104 goto failure; 105 goto failure;
105 106
106 /* OK, now commit destination to socket. */ 107 /* OK, now commit destination to socket. */
107 sk_setup_caps(sk, &rt->u.dst); 108 sk_setup_caps(sk, &rt->u.dst);
108 109
109 dp->dccps_iss = secure_dccp_sequence_number(inet->saddr, inet->daddr, 110 dp->dccps_iss = secure_dccp_sequence_number(inet->inet_saddr,
110 inet->sport, inet->dport); 111 inet->inet_daddr,
111 inet->id = dp->dccps_iss ^ jiffies; 112 inet->inet_sport,
113 inet->inet_dport);
114 inet->inet_id = dp->dccps_iss ^ jiffies;
112 115
113 err = dccp_connect(sk); 116 err = dccp_connect(sk);
114 rt = NULL; 117 rt = NULL;
@@ -123,7 +126,7 @@ failure:
123 dccp_set_state(sk, DCCP_CLOSED); 126 dccp_set_state(sk, DCCP_CLOSED);
124 ip_rt_put(rt); 127 ip_rt_put(rt);
125 sk->sk_route_caps = 0; 128 sk->sk_route_caps = 0;
126 inet->dport = 0; 129 inet->inet_dport = 0;
127 goto out; 130 goto out;
128} 131}
129 132
@@ -352,7 +355,9 @@ void dccp_v4_send_check(struct sock *sk, int unused, struct sk_buff *skb)
352 struct dccp_hdr *dh = dccp_hdr(skb); 355 struct dccp_hdr *dh = dccp_hdr(skb);
353 356
354 dccp_csum_outgoing(skb); 357 dccp_csum_outgoing(skb);
355 dh->dccph_checksum = dccp_v4_csum_finish(skb, inet->saddr, inet->daddr); 358 dh->dccph_checksum = dccp_v4_csum_finish(skb,
359 inet->inet_saddr,
360 inet->inet_daddr);
356} 361}
357 362
358EXPORT_SYMBOL_GPL(dccp_v4_send_check); 363EXPORT_SYMBOL_GPL(dccp_v4_send_check);
@@ -393,18 +398,18 @@ struct sock *dccp_v4_request_recv_sock(struct sock *sk, struct sk_buff *skb,
393 398
394 newinet = inet_sk(newsk); 399 newinet = inet_sk(newsk);
395 ireq = inet_rsk(req); 400 ireq = inet_rsk(req);
396 newinet->daddr = ireq->rmt_addr; 401 newinet->inet_daddr = ireq->rmt_addr;
397 newinet->rcv_saddr = ireq->loc_addr; 402 newinet->inet_rcv_saddr = ireq->loc_addr;
398 newinet->saddr = ireq->loc_addr; 403 newinet->inet_saddr = ireq->loc_addr;
399 newinet->opt = ireq->opt; 404 newinet->opt = ireq->opt;
400 ireq->opt = NULL; 405 ireq->opt = NULL;
401 newinet->mc_index = inet_iif(skb); 406 newinet->mc_index = inet_iif(skb);
402 newinet->mc_ttl = ip_hdr(skb)->ttl; 407 newinet->mc_ttl = ip_hdr(skb)->ttl;
403 newinet->id = jiffies; 408 newinet->inet_id = jiffies;
404 409
405 dccp_sync_mss(newsk, dst_mtu(dst)); 410 dccp_sync_mss(newsk, dst_mtu(dst));
406 411
407 __inet_hash_nolisten(newsk); 412 __inet_hash_nolisten(newsk, NULL);
408 __inet_inherit_port(sk, newsk); 413 __inet_inherit_port(sk, newsk);
409 414
410 return newsk; 415 return newsk;
@@ -473,7 +478,8 @@ static struct dst_entry* dccp_v4_route_skb(struct net *net, struct sock *sk,
473 return &rt->u.dst; 478 return &rt->u.dst;
474} 479}
475 480
476static int dccp_v4_send_response(struct sock *sk, struct request_sock *req) 481static int dccp_v4_send_response(struct sock *sk, struct request_sock *req,
482 struct request_values *rv_unused)
477{ 483{
478 int err = -1; 484 int err = -1;
479 struct sk_buff *skb; 485 struct sk_buff *skb;
@@ -622,7 +628,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
622 dreq->dreq_iss = dccp_v4_init_sequence(skb); 628 dreq->dreq_iss = dccp_v4_init_sequence(skb);
623 dreq->dreq_service = service; 629 dreq->dreq_service = service;
624 630
625 if (dccp_v4_send_response(sk, req)) 631 if (dccp_v4_send_response(sk, req, NULL))
626 goto drop_and_free; 632 goto drop_and_free;
627 633
628 inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT); 634 inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
@@ -987,21 +993,20 @@ static struct inet_protosw dccp_v4_protosw = {
987 .protocol = IPPROTO_DCCP, 993 .protocol = IPPROTO_DCCP,
988 .prot = &dccp_v4_prot, 994 .prot = &dccp_v4_prot,
989 .ops = &inet_dccp_ops, 995 .ops = &inet_dccp_ops,
990 .capability = -1,
991 .no_check = 0, 996 .no_check = 0,
992 .flags = INET_PROTOSW_ICSK, 997 .flags = INET_PROTOSW_ICSK,
993}; 998};
994 999
995static int dccp_v4_init_net(struct net *net) 1000static int __net_init dccp_v4_init_net(struct net *net)
996{ 1001{
997 int err; 1002 if (dccp_hashinfo.bhash == NULL)
1003 return -ESOCKTNOSUPPORT;
998 1004
999 err = inet_ctl_sock_create(&net->dccp.v4_ctl_sk, PF_INET, 1005 return inet_ctl_sock_create(&net->dccp.v4_ctl_sk, PF_INET,
1000 SOCK_DCCP, IPPROTO_DCCP, net); 1006 SOCK_DCCP, IPPROTO_DCCP, net);
1001 return err;
1002} 1007}
1003 1008
1004static void dccp_v4_exit_net(struct net *net) 1009static void __net_exit dccp_v4_exit_net(struct net *net)
1005{ 1010{
1006 inet_ctl_sock_destroy(net->dccp.v4_ctl_sk); 1011 inet_ctl_sock_destroy(net->dccp.v4_ctl_sk);
1007} 1012}
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index e48ca5d45658..3b11e41a2929 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -14,6 +14,7 @@
14 14
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/random.h> 16#include <linux/random.h>
17#include <linux/slab.h>
17#include <linux/xfrm.h> 18#include <linux/xfrm.h>
18 19
19#include <net/addrconf.h> 20#include <net/addrconf.h>
@@ -46,7 +47,7 @@ static void dccp_v6_hash(struct sock *sk)
46 return; 47 return;
47 } 48 }
48 local_bh_disable(); 49 local_bh_disable();
49 __inet6_hash(sk); 50 __inet6_hash(sk, NULL);
50 local_bh_enable(); 51 local_bh_enable();
51 } 52 }
52} 53}
@@ -158,8 +159,8 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
158 ipv6_addr_copy(&fl.fl6_dst, &np->daddr); 159 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
159 ipv6_addr_copy(&fl.fl6_src, &np->saddr); 160 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
160 fl.oif = sk->sk_bound_dev_if; 161 fl.oif = sk->sk_bound_dev_if;
161 fl.fl_ip_dport = inet->dport; 162 fl.fl_ip_dport = inet->inet_dport;
162 fl.fl_ip_sport = inet->sport; 163 fl.fl_ip_sport = inet->inet_sport;
163 security_sk_classify_flow(sk, &fl); 164 security_sk_classify_flow(sk, &fl);
164 165
165 err = ip6_dst_lookup(sk, &dst, &fl); 166 err = ip6_dst_lookup(sk, &dst, &fl);
@@ -241,7 +242,8 @@ out:
241} 242}
242 243
243 244
244static int dccp_v6_send_response(struct sock *sk, struct request_sock *req) 245static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
246 struct request_values *rv_unused)
245{ 247{
246 struct inet6_request_sock *ireq6 = inet6_rsk(req); 248 struct inet6_request_sock *ireq6 = inet6_rsk(req);
247 struct ipv6_pinfo *np = inet6_sk(sk); 249 struct ipv6_pinfo *np = inet6_sk(sk);
@@ -468,7 +470,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
468 dreq->dreq_iss = dccp_v6_init_sequence(skb); 470 dreq->dreq_iss = dccp_v6_init_sequence(skb);
469 dreq->dreq_service = service; 471 dreq->dreq_service = service;
470 472
471 if (dccp_v6_send_response(sk, req)) 473 if (dccp_v6_send_response(sk, req, NULL))
472 goto drop_and_free; 474 goto drop_and_free;
473 475
474 inet6_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT); 476 inet6_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
@@ -510,11 +512,9 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
510 512
511 memcpy(newnp, np, sizeof(struct ipv6_pinfo)); 513 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
512 514
513 ipv6_addr_set(&newnp->daddr, 0, 0, htonl(0x0000FFFF), 515 ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr);
514 newinet->daddr);
515 516
516 ipv6_addr_set(&newnp->saddr, 0, 0, htonl(0x0000FFFF), 517 ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
517 newinet->saddr);
518 518
519 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr); 519 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
520 520
@@ -642,9 +642,10 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
642 642
643 dccp_sync_mss(newsk, dst_mtu(dst)); 643 dccp_sync_mss(newsk, dst_mtu(dst));
644 644
645 newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6; 645 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
646 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
646 647
647 __inet6_hash(newsk); 648 __inet6_hash(newsk, NULL);
648 __inet_inherit_port(sk, newsk); 649 __inet_inherit_port(sk, newsk);
649 650
650 return newsk; 651 return newsk;
@@ -970,12 +971,9 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
970 icsk->icsk_af_ops = &dccp_ipv6_af_ops; 971 icsk->icsk_af_ops = &dccp_ipv6_af_ops;
971 sk->sk_backlog_rcv = dccp_v6_do_rcv; 972 sk->sk_backlog_rcv = dccp_v6_do_rcv;
972 goto failure; 973 goto failure;
973 } else {
974 ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF),
975 inet->saddr);
976 ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000FFFF),
977 inet->rcv_saddr);
978 } 974 }
975 ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
976 ipv6_addr_set_v4mapped(inet->inet_rcv_saddr, &np->rcv_saddr);
979 977
980 return err; 978 return err;
981 } 979 }
@@ -988,7 +986,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
988 ipv6_addr_copy(&fl.fl6_src, saddr ? saddr : &np->saddr); 986 ipv6_addr_copy(&fl.fl6_src, saddr ? saddr : &np->saddr);
989 fl.oif = sk->sk_bound_dev_if; 987 fl.oif = sk->sk_bound_dev_if;
990 fl.fl_ip_dport = usin->sin6_port; 988 fl.fl_ip_dport = usin->sin6_port;
991 fl.fl_ip_sport = inet->sport; 989 fl.fl_ip_sport = inet->inet_sport;
992 security_sk_classify_flow(sk, &fl); 990 security_sk_classify_flow(sk, &fl);
993 991
994 if (np->opt != NULL && np->opt->srcrt != NULL) { 992 if (np->opt != NULL && np->opt->srcrt != NULL) {
@@ -1021,7 +1019,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
1021 1019
1022 /* set the source address */ 1020 /* set the source address */
1023 ipv6_addr_copy(&np->saddr, saddr); 1021 ipv6_addr_copy(&np->saddr, saddr);
1024 inet->rcv_saddr = LOOPBACK4_IPV6; 1022 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
1025 1023
1026 __ip6_dst_store(sk, dst, NULL, NULL); 1024 __ip6_dst_store(sk, dst, NULL, NULL);
1027 1025
@@ -1030,7 +1028,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
1030 icsk->icsk_ext_hdr_len = (np->opt->opt_flen + 1028 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
1031 np->opt->opt_nflen); 1029 np->opt->opt_nflen);
1032 1030
1033 inet->dport = usin->sin6_port; 1031 inet->inet_dport = usin->sin6_port;
1034 1032
1035 dccp_set_state(sk, DCCP_REQUESTING); 1033 dccp_set_state(sk, DCCP_REQUESTING);
1036 err = inet6_hash_connect(&dccp_death_row, sk); 1034 err = inet6_hash_connect(&dccp_death_row, sk);
@@ -1039,7 +1037,8 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
1039 1037
1040 dp->dccps_iss = secure_dccpv6_sequence_number(np->saddr.s6_addr32, 1038 dp->dccps_iss = secure_dccpv6_sequence_number(np->saddr.s6_addr32,
1041 np->daddr.s6_addr32, 1039 np->daddr.s6_addr32,
1042 inet->sport, inet->dport); 1040 inet->inet_sport,
1041 inet->inet_dport);
1043 err = dccp_connect(sk); 1042 err = dccp_connect(sk);
1044 if (err) 1043 if (err)
1045 goto late_failure; 1044 goto late_failure;
@@ -1050,7 +1049,7 @@ late_failure:
1050 dccp_set_state(sk, DCCP_CLOSED); 1049 dccp_set_state(sk, DCCP_CLOSED);
1051 __sk_dst_reset(sk); 1050 __sk_dst_reset(sk);
1052failure: 1051failure:
1053 inet->dport = 0; 1052 inet->inet_dport = 0;
1054 sk->sk_route_caps = 0; 1053 sk->sk_route_caps = 0;
1055 return err; 1054 return err;
1056} 1055}
@@ -1188,20 +1187,19 @@ static struct inet_protosw dccp_v6_protosw = {
1188 .protocol = IPPROTO_DCCP, 1187 .protocol = IPPROTO_DCCP,
1189 .prot = &dccp_v6_prot, 1188 .prot = &dccp_v6_prot,
1190 .ops = &inet6_dccp_ops, 1189 .ops = &inet6_dccp_ops,
1191 .capability = -1,
1192 .flags = INET_PROTOSW_ICSK, 1190 .flags = INET_PROTOSW_ICSK,
1193}; 1191};
1194 1192
1195static int dccp_v6_init_net(struct net *net) 1193static int __net_init dccp_v6_init_net(struct net *net)
1196{ 1194{
1197 int err; 1195 if (dccp_hashinfo.bhash == NULL)
1196 return -ESOCKTNOSUPPORT;
1198 1197
1199 err = inet_ctl_sock_create(&net->dccp.v6_ctl_sk, PF_INET6, 1198 return inet_ctl_sock_create(&net->dccp.v6_ctl_sk, PF_INET6,
1200 SOCK_DCCP, IPPROTO_DCCP, net); 1199 SOCK_DCCP, IPPROTO_DCCP, net);
1201 return err;
1202} 1200}
1203 1201
1204static void dccp_v6_exit_net(struct net *net) 1202static void __net_exit dccp_v6_exit_net(struct net *net)
1205{ 1203{
1206 inet_ctl_sock_destroy(net->dccp.v6_ctl_sk); 1204 inet_ctl_sock_destroy(net->dccp.v6_ctl_sk);
1207} 1205}
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index 5ca49cec95f5..128b089d3aef 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -11,6 +11,7 @@
11 */ 11 */
12 12
13#include <linux/dccp.h> 13#include <linux/dccp.h>
14#include <linux/gfp.h>
14#include <linux/kernel.h> 15#include <linux/kernel.h>
15#include <linux/skbuff.h> 16#include <linux/skbuff.h>
16#include <linux/timer.h> 17#include <linux/timer.h>
@@ -184,7 +185,7 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
184 * counter (backoff, monitored by dccp_response_timer). 185 * counter (backoff, monitored by dccp_response_timer).
185 */ 186 */
186 req->retrans++; 187 req->retrans++;
187 req->rsk_ops->rtx_syn_ack(sk, req); 188 req->rsk_ops->rtx_syn_ack(sk, req, NULL);
188 } 189 }
189 /* Network Duplicate, discard packet */ 190 /* Network Duplicate, discard packet */
190 return NULL; 191 return NULL;
@@ -254,7 +255,7 @@ int dccp_child_process(struct sock *parent, struct sock *child,
254 * in main socket hash table and lock on listening 255 * in main socket hash table and lock on listening
255 * socket does not protect us more. 256 * socket does not protect us more.
256 */ 257 */
257 sk_add_backlog(child, skb); 258 __sk_add_backlog(child, skb);
258 } 259 }
259 260
260 bh_unlock_sock(child); 261 bh_unlock_sock(child);
diff --git a/net/dccp/output.c b/net/dccp/output.c
index c96119fda688..fc3f436440b4 100644
--- a/net/dccp/output.c
+++ b/net/dccp/output.c
@@ -13,6 +13,7 @@
13#include <linux/dccp.h> 13#include <linux/dccp.h>
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/skbuff.h> 15#include <linux/skbuff.h>
16#include <linux/slab.h>
16 17
17#include <net/inet_sock.h> 18#include <net/inet_sock.h>
18#include <net/sock.h> 19#include <net/sock.h>
@@ -99,8 +100,8 @@ static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb)
99 /* Build DCCP header and checksum it. */ 100 /* Build DCCP header and checksum it. */
100 dh = dccp_zeroed_hdr(skb, dccp_header_size); 101 dh = dccp_zeroed_hdr(skb, dccp_header_size);
101 dh->dccph_type = dcb->dccpd_type; 102 dh->dccph_type = dcb->dccpd_type;
102 dh->dccph_sport = inet->sport; 103 dh->dccph_sport = inet->inet_sport;
103 dh->dccph_dport = inet->dport; 104 dh->dccph_dport = inet->inet_dport;
104 dh->dccph_doff = (dccp_header_size + dcb->dccpd_opt_len) / 4; 105 dh->dccph_doff = (dccp_header_size + dcb->dccpd_opt_len) / 4;
105 dh->dccph_ccval = dcb->dccpd_ccval; 106 dh->dccph_ccval = dcb->dccpd_ccval;
106 dh->dccph_cscov = dp->dccps_pcslen; 107 dh->dccph_cscov = dp->dccps_pcslen;
diff --git a/net/dccp/probe.c b/net/dccp/probe.c
index 37731da41481..078e48d442fd 100644
--- a/net/dccp/probe.c
+++ b/net/dccp/probe.c
@@ -30,6 +30,7 @@
30#include <linux/module.h> 30#include <linux/module.h>
31#include <linux/kfifo.h> 31#include <linux/kfifo.h>
32#include <linux/vmalloc.h> 32#include <linux/vmalloc.h>
33#include <linux/gfp.h>
33#include <net/net_namespace.h> 34#include <net/net_namespace.h>
34 35
35#include "dccp.h" 36#include "dccp.h"
@@ -43,7 +44,7 @@ static int bufsize = 64 * 1024;
43static const char procname[] = "dccpprobe"; 44static const char procname[] = "dccpprobe";
44 45
45static struct { 46static struct {
46 struct kfifo *fifo; 47 struct kfifo fifo;
47 spinlock_t lock; 48 spinlock_t lock;
48 wait_queue_head_t wait; 49 wait_queue_head_t wait;
49 struct timespec tstart; 50 struct timespec tstart;
@@ -67,7 +68,7 @@ static void printl(const char *fmt, ...)
67 len += vscnprintf(tbuf+len, sizeof(tbuf)-len, fmt, args); 68 len += vscnprintf(tbuf+len, sizeof(tbuf)-len, fmt, args);
68 va_end(args); 69 va_end(args);
69 70
70 kfifo_put(dccpw.fifo, tbuf, len); 71 kfifo_in_locked(&dccpw.fifo, tbuf, len, &dccpw.lock);
71 wake_up(&dccpw.wait); 72 wake_up(&dccpw.wait);
72} 73}
73 74
@@ -75,26 +76,25 @@ static int jdccp_sendmsg(struct kiocb *iocb, struct sock *sk,
75 struct msghdr *msg, size_t size) 76 struct msghdr *msg, size_t size)
76{ 77{
77 const struct inet_sock *inet = inet_sk(sk); 78 const struct inet_sock *inet = inet_sk(sk);
78 struct ccid3_hc_tx_sock *hctx = NULL; 79 struct ccid3_hc_tx_sock *hc = NULL;
79 80
80 if (ccid_get_current_tx_ccid(dccp_sk(sk)) == DCCPC_CCID3) 81 if (ccid_get_current_tx_ccid(dccp_sk(sk)) == DCCPC_CCID3)
81 hctx = ccid3_hc_tx_sk(sk); 82 hc = ccid3_hc_tx_sk(sk);
82 83
83 if (port == 0 || ntohs(inet->dport) == port || 84 if (port == 0 || ntohs(inet->inet_dport) == port ||
84 ntohs(inet->sport) == port) { 85 ntohs(inet->inet_sport) == port) {
85 if (hctx) 86 if (hc)
86 printl("%pI4:%u %pI4:%u %d %d %d %d %u " 87 printl("%pI4:%u %pI4:%u %d %d %d %d %u %llu %llu %d\n",
87 "%llu %llu %d\n", 88 &inet->inet_saddr, ntohs(inet->inet_sport),
88 &inet->saddr, ntohs(inet->sport), 89 &inet->inet_daddr, ntohs(inet->inet_dport), size,
89 &inet->daddr, ntohs(inet->dport), size, 90 hc->tx_s, hc->tx_rtt, hc->tx_p,
90 hctx->ccid3hctx_s, hctx->ccid3hctx_rtt, 91 hc->tx_x_calc, hc->tx_x_recv >> 6,
91 hctx->ccid3hctx_p, hctx->ccid3hctx_x_calc, 92 hc->tx_x >> 6, hc->tx_t_ipi);
92 hctx->ccid3hctx_x_recv >> 6,
93 hctx->ccid3hctx_x >> 6, hctx->ccid3hctx_t_ipi);
94 else 93 else
95 printl("%pI4:%u %pI4:%u %d\n", 94 printl("%pI4:%u %pI4:%u %d\n",
96 &inet->saddr, ntohs(inet->sport), 95 &inet->inet_saddr, ntohs(inet->inet_sport),
97 &inet->daddr, ntohs(inet->dport), size); 96 &inet->inet_daddr, ntohs(inet->inet_dport),
97 size);
98 } 98 }
99 99
100 jprobe_return(); 100 jprobe_return();
@@ -110,7 +110,7 @@ static struct jprobe dccp_send_probe = {
110 110
111static int dccpprobe_open(struct inode *inode, struct file *file) 111static int dccpprobe_open(struct inode *inode, struct file *file)
112{ 112{
113 kfifo_reset(dccpw.fifo); 113 kfifo_reset(&dccpw.fifo);
114 getnstimeofday(&dccpw.tstart); 114 getnstimeofday(&dccpw.tstart);
115 return 0; 115 return 0;
116} 116}
@@ -132,11 +132,11 @@ static ssize_t dccpprobe_read(struct file *file, char __user *buf,
132 return -ENOMEM; 132 return -ENOMEM;
133 133
134 error = wait_event_interruptible(dccpw.wait, 134 error = wait_event_interruptible(dccpw.wait,
135 __kfifo_len(dccpw.fifo) != 0); 135 kfifo_len(&dccpw.fifo) != 0);
136 if (error) 136 if (error)
137 goto out_free; 137 goto out_free;
138 138
139 cnt = kfifo_get(dccpw.fifo, tbuf, len); 139 cnt = kfifo_out_locked(&dccpw.fifo, tbuf, len, &dccpw.lock);
140 error = copy_to_user(buf, tbuf, cnt) ? -EFAULT : 0; 140 error = copy_to_user(buf, tbuf, cnt) ? -EFAULT : 0;
141 141
142out_free: 142out_free:
@@ -157,14 +157,13 @@ static __init int dccpprobe_init(void)
157 157
158 init_waitqueue_head(&dccpw.wait); 158 init_waitqueue_head(&dccpw.wait);
159 spin_lock_init(&dccpw.lock); 159 spin_lock_init(&dccpw.lock);
160 dccpw.fifo = kfifo_alloc(bufsize, GFP_KERNEL, &dccpw.lock); 160 if (kfifo_alloc(&dccpw.fifo, bufsize, GFP_KERNEL))
161 if (IS_ERR(dccpw.fifo)) 161 return ret;
162 return PTR_ERR(dccpw.fifo);
163
164 if (!proc_net_fops_create(&init_net, procname, S_IRUSR, &dccpprobe_fops)) 162 if (!proc_net_fops_create(&init_net, procname, S_IRUSR, &dccpprobe_fops))
165 goto err0; 163 goto err0;
166 164
167 ret = register_jprobe(&dccp_send_probe); 165 try_then_request_module((ret = register_jprobe(&dccp_send_probe)) == 0,
166 "dccp");
168 if (ret) 167 if (ret)
169 goto err1; 168 goto err1;
170 169
@@ -173,14 +172,14 @@ static __init int dccpprobe_init(void)
173err1: 172err1:
174 proc_net_remove(&init_net, procname); 173 proc_net_remove(&init_net, procname);
175err0: 174err0:
176 kfifo_free(dccpw.fifo); 175 kfifo_free(&dccpw.fifo);
177 return ret; 176 return ret;
178} 177}
179module_init(dccpprobe_init); 178module_init(dccpprobe_init);
180 179
181static __exit void dccpprobe_exit(void) 180static __exit void dccpprobe_exit(void)
182{ 181{
183 kfifo_free(dccpw.fifo); 182 kfifo_free(&dccpw.fifo);
184 proc_net_remove(&init_net, procname); 183 proc_net_remove(&init_net, procname);
185 unregister_jprobe(&dccp_send_probe); 184 unregister_jprobe(&dccp_send_probe);
186 185
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index a156319fd0ac..a0e38d8018f5 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -20,6 +20,7 @@
20#include <linux/if_arp.h> 20#include <linux/if_arp.h>
21#include <linux/init.h> 21#include <linux/init.h>
22#include <linux/random.h> 22#include <linux/random.h>
23#include <linux/slab.h>
23#include <net/checksum.h> 24#include <net/checksum.h>
24 25
25#include <net/inet_sock.h> 26#include <net/inet_sock.h>
@@ -278,7 +279,7 @@ int dccp_disconnect(struct sock *sk, int flags)
278 sk->sk_send_head = NULL; 279 sk->sk_send_head = NULL;
279 } 280 }
280 281
281 inet->dport = 0; 282 inet->inet_dport = 0;
282 283
283 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) 284 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
284 inet_reset_saddr(sk); 285 inet_reset_saddr(sk);
@@ -290,7 +291,7 @@ int dccp_disconnect(struct sock *sk, int flags)
290 inet_csk_delack_init(sk); 291 inet_csk_delack_init(sk);
291 __sk_dst_reset(sk); 292 __sk_dst_reset(sk);
292 293
293 WARN_ON(inet->num && !icsk->icsk_bind_hash); 294 WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
294 295
295 sk->sk_error_report(sk); 296 sk->sk_error_report(sk);
296 return err; 297 return err;
@@ -835,6 +836,8 @@ verify_sock_status:
835 len = -EFAULT; 836 len = -EFAULT;
836 break; 837 break;
837 } 838 }
839 if (flags & MSG_TRUNC)
840 len = skb->len;
838 found_fin_ok: 841 found_fin_ok:
839 if (!(flags & MSG_PEEK)) 842 if (!(flags & MSG_PEEK))
840 sk_eat_skb(sk, skb, 0); 843 sk_eat_skb(sk, skb, 0);
@@ -1003,12 +1006,13 @@ EXPORT_SYMBOL_GPL(dccp_shutdown);
1003 1006
1004static inline int dccp_mib_init(void) 1007static inline int dccp_mib_init(void)
1005{ 1008{
1006 return snmp_mib_init((void**)dccp_statistics, sizeof(struct dccp_mib)); 1009 return snmp_mib_init((void __percpu **)dccp_statistics,
1010 sizeof(struct dccp_mib));
1007} 1011}
1008 1012
1009static inline void dccp_mib_exit(void) 1013static inline void dccp_mib_exit(void)
1010{ 1014{
1011 snmp_mib_free((void**)dccp_statistics); 1015 snmp_mib_free((void __percpu **)dccp_statistics);
1012} 1016}
1013 1017
1014static int thash_entries; 1018static int thash_entries;
@@ -1033,7 +1037,7 @@ static int __init dccp_init(void)
1033 FIELD_SIZEOF(struct sk_buff, cb)); 1037 FIELD_SIZEOF(struct sk_buff, cb));
1034 rc = percpu_counter_init(&dccp_orphan_count, 0); 1038 rc = percpu_counter_init(&dccp_orphan_count, 0);
1035 if (rc) 1039 if (rc)
1036 goto out; 1040 goto out_fail;
1037 rc = -ENOBUFS; 1041 rc = -ENOBUFS;
1038 inet_hashinfo_init(&dccp_hashinfo); 1042 inet_hashinfo_init(&dccp_hashinfo);
1039 dccp_hashinfo.bind_bucket_cachep = 1043 dccp_hashinfo.bind_bucket_cachep =
@@ -1060,11 +1064,12 @@ static int __init dccp_init(void)
1060 for (ehash_order = 0; (1UL << ehash_order) < goal; ehash_order++) 1064 for (ehash_order = 0; (1UL << ehash_order) < goal; ehash_order++)
1061 ; 1065 ;
1062 do { 1066 do {
1063 dccp_hashinfo.ehash_size = (1UL << ehash_order) * PAGE_SIZE / 1067 unsigned long hash_size = (1UL << ehash_order) * PAGE_SIZE /
1064 sizeof(struct inet_ehash_bucket); 1068 sizeof(struct inet_ehash_bucket);
1065 while (dccp_hashinfo.ehash_size & 1069
1066 (dccp_hashinfo.ehash_size - 1)) 1070 while (hash_size & (hash_size - 1))
1067 dccp_hashinfo.ehash_size--; 1071 hash_size--;
1072 dccp_hashinfo.ehash_mask = hash_size - 1;
1068 dccp_hashinfo.ehash = (struct inet_ehash_bucket *) 1073 dccp_hashinfo.ehash = (struct inet_ehash_bucket *)
1069 __get_free_pages(GFP_ATOMIC|__GFP_NOWARN, ehash_order); 1074 __get_free_pages(GFP_ATOMIC|__GFP_NOWARN, ehash_order);
1070 } while (!dccp_hashinfo.ehash && --ehash_order > 0); 1075 } while (!dccp_hashinfo.ehash && --ehash_order > 0);
@@ -1074,7 +1079,7 @@ static int __init dccp_init(void)
1074 goto out_free_bind_bucket_cachep; 1079 goto out_free_bind_bucket_cachep;
1075 } 1080 }
1076 1081
1077 for (i = 0; i < dccp_hashinfo.ehash_size; i++) { 1082 for (i = 0; i <= dccp_hashinfo.ehash_mask; i++) {
1078 INIT_HLIST_NULLS_HEAD(&dccp_hashinfo.ehash[i].chain, i); 1083 INIT_HLIST_NULLS_HEAD(&dccp_hashinfo.ehash[i].chain, i);
1079 INIT_HLIST_NULLS_HEAD(&dccp_hashinfo.ehash[i].twchain, i); 1084 INIT_HLIST_NULLS_HEAD(&dccp_hashinfo.ehash[i].twchain, i);
1080 } 1085 }
@@ -1121,8 +1126,9 @@ static int __init dccp_init(void)
1121 goto out_sysctl_exit; 1126 goto out_sysctl_exit;
1122 1127
1123 dccp_timestamping_init(); 1128 dccp_timestamping_init();
1124out: 1129
1125 return rc; 1130 return 0;
1131
1126out_sysctl_exit: 1132out_sysctl_exit:
1127 dccp_sysctl_exit(); 1133 dccp_sysctl_exit();
1128out_ackvec_exit: 1134out_ackvec_exit:
@@ -1131,18 +1137,19 @@ out_free_dccp_mib:
1131 dccp_mib_exit(); 1137 dccp_mib_exit();
1132out_free_dccp_bhash: 1138out_free_dccp_bhash:
1133 free_pages((unsigned long)dccp_hashinfo.bhash, bhash_order); 1139 free_pages((unsigned long)dccp_hashinfo.bhash, bhash_order);
1134 dccp_hashinfo.bhash = NULL;
1135out_free_dccp_locks: 1140out_free_dccp_locks:
1136 inet_ehash_locks_free(&dccp_hashinfo); 1141 inet_ehash_locks_free(&dccp_hashinfo);
1137out_free_dccp_ehash: 1142out_free_dccp_ehash:
1138 free_pages((unsigned long)dccp_hashinfo.ehash, ehash_order); 1143 free_pages((unsigned long)dccp_hashinfo.ehash, ehash_order);
1139 dccp_hashinfo.ehash = NULL;
1140out_free_bind_bucket_cachep: 1144out_free_bind_bucket_cachep:
1141 kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep); 1145 kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
1142 dccp_hashinfo.bind_bucket_cachep = NULL;
1143out_free_percpu: 1146out_free_percpu:
1144 percpu_counter_destroy(&dccp_orphan_count); 1147 percpu_counter_destroy(&dccp_orphan_count);
1145 goto out; 1148out_fail:
1149 dccp_hashinfo.bhash = NULL;
1150 dccp_hashinfo.ehash = NULL;
1151 dccp_hashinfo.bind_bucket_cachep = NULL;
1152 return rc;
1146} 1153}
1147 1154
1148static void __exit dccp_fini(void) 1155static void __exit dccp_fini(void)
@@ -1153,7 +1160,7 @@ static void __exit dccp_fini(void)
1153 get_order(dccp_hashinfo.bhash_size * 1160 get_order(dccp_hashinfo.bhash_size *
1154 sizeof(struct inet_bind_hashbucket))); 1161 sizeof(struct inet_bind_hashbucket)));
1155 free_pages((unsigned long)dccp_hashinfo.ehash, 1162 free_pages((unsigned long)dccp_hashinfo.ehash,
1156 get_order(dccp_hashinfo.ehash_size * 1163 get_order((dccp_hashinfo.ehash_mask + 1) *
1157 sizeof(struct inet_ehash_bucket))); 1164 sizeof(struct inet_ehash_bucket)));
1158 inet_ehash_locks_free(&dccp_hashinfo); 1165 inet_ehash_locks_free(&dccp_hashinfo);
1159 kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep); 1166 kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
diff --git a/net/dccp/sysctl.c b/net/dccp/sysctl.c
index a5a1856234e7..563943822e58 100644
--- a/net/dccp/sysctl.c
+++ b/net/dccp/sysctl.c
@@ -93,13 +93,13 @@ static struct ctl_table dccp_default_table[] = {
93 .proc_handler = proc_dointvec_ms_jiffies, 93 .proc_handler = proc_dointvec_ms_jiffies,
94 }, 94 },
95 95
96 { .ctl_name = 0, } 96 { }
97}; 97};
98 98
99static struct ctl_path dccp_path[] = { 99static struct ctl_path dccp_path[] = {
100 { .procname = "net", .ctl_name = CTL_NET, }, 100 { .procname = "net", },
101 { .procname = "dccp", .ctl_name = NET_DCCP, }, 101 { .procname = "dccp", },
102 { .procname = "default", .ctl_name = NET_DCCP_DEFAULT, }, 102 { .procname = "default", },
103 { } 103 { }
104}; 104};
105 105
diff --git a/net/dccp/timer.c b/net/dccp/timer.c
index 162d1e683c39..bbfeb5eae46a 100644
--- a/net/dccp/timer.c
+++ b/net/dccp/timer.c
@@ -38,7 +38,7 @@ static int dccp_write_timeout(struct sock *sk)
38 38
39 if (sk->sk_state == DCCP_REQUESTING || sk->sk_state == DCCP_PARTOPEN) { 39 if (sk->sk_state == DCCP_REQUESTING || sk->sk_state == DCCP_PARTOPEN) {
40 if (icsk->icsk_retransmits != 0) 40 if (icsk->icsk_retransmits != 0)
41 dst_negative_advice(&sk->sk_dst_cache); 41 dst_negative_advice(&sk->sk_dst_cache, sk);
42 retry_until = icsk->icsk_syn_retries ? 42 retry_until = icsk->icsk_syn_retries ?
43 : sysctl_dccp_request_retries; 43 : sysctl_dccp_request_retries;
44 } else { 44 } else {
@@ -63,7 +63,7 @@ static int dccp_write_timeout(struct sock *sk)
63 Golden words :-). 63 Golden words :-).
64 */ 64 */
65 65
66 dst_negative_advice(&sk->sk_dst_cache); 66 dst_negative_advice(&sk->sk_dst_cache, sk);
67 } 67 }
68 68
69 retry_until = sysctl_dccp_retries2; 69 retry_until = sysctl_dccp_retries2;