aboutsummaryrefslogtreecommitdiffstats
path: root/net/dccp
diff options
context:
space:
mode:
authorAndrea Bittau <a.bittau@cs.ucl.ac.uk>2006-09-19 16:13:37 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2006-09-22 18:19:40 -0400
commit07978aabd52ce67f59971872c80f76d6e3ca18ae (patch)
tree30b878674d4cf147cc6a0e91ff953298bc07b8e2 /net/dccp
parent8d424f6ca2d02026dadff409770639d720375afb (diff)
[DCCP] CCID2: Allocate seq records on demand
Allocate more sequence state on demand. Each time a packet is sent out by CCID2, a record of it needs to be kept. This list of records grows proportionally to cwnd. Previously, the length of this list was hardcored and therefore the cwnd could only grow to this value (of 128). Now, records are allocated on demand as necessary---cwnd may grow as it wishes. The exceptional case of when memory is not available is not handled gracefully. Perhaps, cwnd should be capped at that point. Signed-off-by: Andrea Bittau <a.bittau@cs.ucl.ac.uk> Signed-off-by: Arnaldo Carvalho de Melo <acme@mandriva.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/dccp')
-rw-r--r--net/dccp/ccids/ccid2.c96
-rw-r--r--net/dccp/ccids/ccid2.h6
2 files changed, 70 insertions, 32 deletions
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index dbcda7e868b7..93a30ae8d07a 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -44,8 +44,6 @@ static int ccid2_debug;
44#define ccid2_pr_debug(format, a...) 44#define ccid2_pr_debug(format, a...)
45#endif 45#endif
46 46
47static const int ccid2_seq_len = 128;
48
49#ifdef CONFIG_IP_DCCP_CCID2_DEBUG 47#ifdef CONFIG_IP_DCCP_CCID2_DEBUG
50static void ccid2_hc_tx_check_sanity(const struct ccid2_hc_tx_sock *hctx) 48static void ccid2_hc_tx_check_sanity(const struct ccid2_hc_tx_sock *hctx)
51{ 49{
@@ -71,7 +69,6 @@ static void ccid2_hc_tx_check_sanity(const struct ccid2_hc_tx_sock *hctx)
71 BUG_ON(seqp->ccid2s_seq <= prev->ccid2s_seq); 69 BUG_ON(seqp->ccid2s_seq <= prev->ccid2s_seq);
72 BUG_ON(time_before(seqp->ccid2s_sent, 70 BUG_ON(time_before(seqp->ccid2s_sent,
73 prev->ccid2s_sent)); 71 prev->ccid2s_sent));
74 BUG_ON(len > ccid2_seq_len);
75 72
76 seqp = prev; 73 seqp = prev;
77 } 74 }
@@ -83,16 +80,57 @@ static void ccid2_hc_tx_check_sanity(const struct ccid2_hc_tx_sock *hctx)
83 do { 80 do {
84 seqp = seqp->ccid2s_prev; 81 seqp = seqp->ccid2s_prev;
85 len++; 82 len++;
86 BUG_ON(len > ccid2_seq_len);
87 } while (seqp != hctx->ccid2hctx_seqh); 83 } while (seqp != hctx->ccid2hctx_seqh);
88 84
89 BUG_ON(len != ccid2_seq_len);
90 ccid2_pr_debug("total len=%d\n", len); 85 ccid2_pr_debug("total len=%d\n", len);
86 BUG_ON(len != hctx->ccid2hctx_seqbufc * CCID2_SEQBUF_LEN);
91} 87}
92#else 88#else
93#define ccid2_hc_tx_check_sanity(hctx) do {} while (0) 89#define ccid2_hc_tx_check_sanity(hctx) do {} while (0)
94#endif 90#endif
95 91
92static int ccid2_hc_tx_alloc_seq(struct ccid2_hc_tx_sock *hctx, int num,
93 gfp_t gfp)
94{
95 struct ccid2_seq *seqp;
96 int i;
97
98 /* check if we have space to preserve the pointer to the buffer */
99 if (hctx->ccid2hctx_seqbufc >= (sizeof(hctx->ccid2hctx_seqbuf) /
100 sizeof(struct ccid2_seq*)))
101 return -ENOMEM;
102
103 /* allocate buffer and initialize linked list */
104 seqp = kmalloc(sizeof(*seqp) * num, gfp);
105 if (seqp == NULL)
106 return -ENOMEM;
107
108 for (i = 0; i < (num - 1); i++) {
109 seqp[i].ccid2s_next = &seqp[i + 1];
110 seqp[i + 1].ccid2s_prev = &seqp[i];
111 }
112 seqp[num - 1].ccid2s_next = seqp;
113 seqp->ccid2s_prev = &seqp[num - 1];
114
115 /* This is the first allocation. Initiate the head and tail. */
116 if (hctx->ccid2hctx_seqbufc == 0)
117 hctx->ccid2hctx_seqh = hctx->ccid2hctx_seqt = seqp;
118 else {
119 /* link the existing list with the one we just created */
120 hctx->ccid2hctx_seqh->ccid2s_next = seqp;
121 seqp->ccid2s_prev = hctx->ccid2hctx_seqh;
122
123 hctx->ccid2hctx_seqt->ccid2s_prev = &seqp[num - 1];
124 seqp[num - 1].ccid2s_next = hctx->ccid2hctx_seqt;
125 }
126
127 /* store the original pointer to the buffer so we can free it */
128 hctx->ccid2hctx_seqbuf[hctx->ccid2hctx_seqbufc] = seqp;
129 hctx->ccid2hctx_seqbufc++;
130
131 return 0;
132}
133
96static int ccid2_hc_tx_send_packet(struct sock *sk, 134static int ccid2_hc_tx_send_packet(struct sock *sk,
97 struct sk_buff *skb, int len) 135 struct sk_buff *skb, int len)
98{ 136{
@@ -231,6 +269,7 @@ static void ccid2_hc_tx_packet_sent(struct sock *sk, int more, int len)
231{ 269{
232 struct dccp_sock *dp = dccp_sk(sk); 270 struct dccp_sock *dp = dccp_sk(sk);
233 struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); 271 struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk);
272 struct ccid2_seq *next;
234 u64 seq; 273 u64 seq;
235 274
236 ccid2_hc_tx_check_sanity(hctx); 275 ccid2_hc_tx_check_sanity(hctx);
@@ -250,15 +289,23 @@ static void ccid2_hc_tx_packet_sent(struct sock *sk, int more, int len)
250 hctx->ccid2hctx_seqh->ccid2s_seq = seq; 289 hctx->ccid2hctx_seqh->ccid2s_seq = seq;
251 hctx->ccid2hctx_seqh->ccid2s_acked = 0; 290 hctx->ccid2hctx_seqh->ccid2s_acked = 0;
252 hctx->ccid2hctx_seqh->ccid2s_sent = jiffies; 291 hctx->ccid2hctx_seqh->ccid2s_sent = jiffies;
253 hctx->ccid2hctx_seqh = hctx->ccid2hctx_seqh->ccid2s_next;
254 292
255 ccid2_pr_debug("cwnd=%d pipe=%d\n", hctx->ccid2hctx_cwnd, 293 next = hctx->ccid2hctx_seqh->ccid2s_next;
256 hctx->ccid2hctx_pipe); 294 /* check if we need to alloc more space */
295 if (next == hctx->ccid2hctx_seqt) {
296 int rc;
257 297
258 if (hctx->ccid2hctx_seqh == hctx->ccid2hctx_seqt) { 298 ccid2_pr_debug("allocating more space in history\n");
259 /* XXX allocate more space */ 299 rc = ccid2_hc_tx_alloc_seq(hctx, CCID2_SEQBUF_LEN, GFP_KERNEL);
260 WARN_ON(1); 300 BUG_ON(rc); /* XXX what do we do? */
301
302 next = hctx->ccid2hctx_seqh->ccid2s_next;
303 BUG_ON(next == hctx->ccid2hctx_seqt);
261 } 304 }
305 hctx->ccid2hctx_seqh = next;
306
307 ccid2_pr_debug("cwnd=%d pipe=%d\n", hctx->ccid2hctx_cwnd,
308 hctx->ccid2hctx_pipe);
262 309
263 hctx->ccid2hctx_sent++; 310 hctx->ccid2hctx_sent++;
264 311
@@ -674,8 +721,6 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
674static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk) 721static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk)
675{ 722{
676 struct ccid2_hc_tx_sock *hctx = ccid_priv(ccid); 723 struct ccid2_hc_tx_sock *hctx = ccid_priv(ccid);
677 int seqcount = ccid2_seq_len;
678 int i;
679 724
680 hctx->ccid2hctx_cwnd = 1; 725 hctx->ccid2hctx_cwnd = 1;
681 /* Initialize ssthresh to infinity. This means that we will exit the 726 /* Initialize ssthresh to infinity. This means that we will exit the
@@ -684,26 +729,12 @@ static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk)
684 */ 729 */
685 hctx->ccid2hctx_ssthresh = ~0; 730 hctx->ccid2hctx_ssthresh = ~0;
686 hctx->ccid2hctx_numdupack = 3; 731 hctx->ccid2hctx_numdupack = 3;
732 hctx->ccid2hctx_seqbufc = 0;
687 733
688 /* XXX init ~ to window size... */ 734 /* XXX init ~ to window size... */
689 hctx->ccid2hctx_seqbuf = kmalloc(sizeof(*hctx->ccid2hctx_seqbuf) * 735 if (ccid2_hc_tx_alloc_seq(hctx, CCID2_SEQBUF_LEN, GFP_ATOMIC) != 0)
690 seqcount, gfp_any());
691 if (hctx->ccid2hctx_seqbuf == NULL)
692 return -ENOMEM; 736 return -ENOMEM;
693 737
694 for (i = 0; i < (seqcount - 1); i++) {
695 hctx->ccid2hctx_seqbuf[i].ccid2s_next =
696 &hctx->ccid2hctx_seqbuf[i + 1];
697 hctx->ccid2hctx_seqbuf[i + 1].ccid2s_prev =
698 &hctx->ccid2hctx_seqbuf[i];
699 }
700 hctx->ccid2hctx_seqbuf[seqcount - 1].ccid2s_next =
701 hctx->ccid2hctx_seqbuf;
702 hctx->ccid2hctx_seqbuf->ccid2s_prev =
703 &hctx->ccid2hctx_seqbuf[seqcount - 1];
704
705 hctx->ccid2hctx_seqh = hctx->ccid2hctx_seqbuf;
706 hctx->ccid2hctx_seqt = hctx->ccid2hctx_seqh;
707 hctx->ccid2hctx_sent = 0; 738 hctx->ccid2hctx_sent = 0;
708 hctx->ccid2hctx_rto = 3 * HZ; 739 hctx->ccid2hctx_rto = 3 * HZ;
709 hctx->ccid2hctx_srtt = -1; 740 hctx->ccid2hctx_srtt = -1;
@@ -722,10 +753,13 @@ static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk)
722static void ccid2_hc_tx_exit(struct sock *sk) 753static void ccid2_hc_tx_exit(struct sock *sk)
723{ 754{
724 struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); 755 struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk);
756 int i;
725 757
726 ccid2_hc_tx_kill_rto_timer(sk); 758 ccid2_hc_tx_kill_rto_timer(sk);
727 kfree(hctx->ccid2hctx_seqbuf); 759
728 hctx->ccid2hctx_seqbuf = NULL; 760 for (i = 0; i < hctx->ccid2hctx_seqbufc; i++)
761 kfree(hctx->ccid2hctx_seqbuf[i]);
762 hctx->ccid2hctx_seqbufc = 0;
729} 763}
730 764
731static void ccid2_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) 765static void ccid2_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
diff --git a/net/dccp/ccids/ccid2.h b/net/dccp/ccids/ccid2.h
index b4cc6c0bf020..2a02ce04ba85 100644
--- a/net/dccp/ccids/ccid2.h
+++ b/net/dccp/ccids/ccid2.h
@@ -35,6 +35,9 @@ struct ccid2_seq {
35 struct ccid2_seq *ccid2s_next; 35 struct ccid2_seq *ccid2s_next;
36}; 36};
37 37
38#define CCID2_SEQBUF_LEN 256
39#define CCID2_SEQBUF_MAX 128
40
38/** struct ccid2_hc_tx_sock - CCID2 TX half connection 41/** struct ccid2_hc_tx_sock - CCID2 TX half connection
39 * 42 *
40 * @ccid2hctx_ssacks - ACKs recv in slow start 43 * @ccid2hctx_ssacks - ACKs recv in slow start
@@ -53,7 +56,8 @@ struct ccid2_hc_tx_sock {
53 unsigned int ccid2hctx_ssthresh; 56 unsigned int ccid2hctx_ssthresh;
54 int ccid2hctx_pipe; 57 int ccid2hctx_pipe;
55 int ccid2hctx_numdupack; 58 int ccid2hctx_numdupack;
56 struct ccid2_seq *ccid2hctx_seqbuf; 59 struct ccid2_seq *ccid2hctx_seqbuf[CCID2_SEQBUF_MAX];
60 int ccid2hctx_seqbufc;
57 struct ccid2_seq *ccid2hctx_seqh; 61 struct ccid2_seq *ccid2hctx_seqh;
58 struct ccid2_seq *ccid2hctx_seqt; 62 struct ccid2_seq *ccid2hctx_seqt;
59 long ccid2hctx_rto; 63 long ccid2hctx_rto;