aboutsummaryrefslogtreecommitdiffstats
path: root/net/sctp
diff options
context:
space:
mode:
authorJeff Garzik <jgarzik@pobox.com>2005-11-11 23:39:35 -0500
committerJeff Garzik <jgarzik@pobox.com>2005-11-11 23:39:35 -0500
commitf4256e301d9800b1e0276404cb01b3ac85b51067 (patch)
tree975f56627b78f757608b31684311a24ca1478481 /net/sctp
parentfb2a26b9f8f5eda6b96ba9753edf105e5999d6d9 (diff)
parentcd52d1ee9a92587b242d946a2300a3245d3b885a (diff)
Merge branch 'master'
Diffstat (limited to 'net/sctp')
-rw-r--r--net/sctp/associola.c33
-rw-r--r--net/sctp/endpointola.c26
-rw-r--r--net/sctp/input.c20
-rw-r--r--net/sctp/protocol.c6
-rw-r--r--net/sctp/sm_sideeffect.c6
-rw-r--r--net/sctp/sm_statefuns.c22
-rw-r--r--net/sctp/socket.c5
-rw-r--r--net/sctp/sysctl.c8
-rw-r--r--net/sctp/ulpevent.c24
9 files changed, 82 insertions, 68 deletions
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 8c8ddf7f9b61..dec68a604773 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -128,9 +128,29 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
128 */ 128 */
129 asoc->max_burst = sctp_max_burst; 129 asoc->max_burst = sctp_max_burst;
130 130
131 /* Copy things from the endpoint. */ 131 /* initialize association timers */
132 asoc->timeouts[SCTP_EVENT_TIMEOUT_NONE] = 0;
133 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] = asoc->rto_initial;
134 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] = asoc->rto_initial;
135 asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = asoc->rto_initial;
136 asoc->timeouts[SCTP_EVENT_TIMEOUT_T3_RTX] = 0;
137 asoc->timeouts[SCTP_EVENT_TIMEOUT_T4_RTO] = 0;
138
139 /* sctpimpguide Section 2.12.2
140 * If the 'T5-shutdown-guard' timer is used, it SHOULD be set to the
141 * recommended value of 5 times 'RTO.Max'.
142 */
143 asoc->timeouts[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]
144 = 5 * asoc->rto_max;
145
146 asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0;
147 asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] =
148 SCTP_DEFAULT_TIMEOUT_SACK;
149 asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] =
150 sp->autoclose * HZ;
151
152 /* Initilizes the timers */
132 for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) { 153 for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) {
133 asoc->timeouts[i] = ep->timeouts[i];
134 init_timer(&asoc->timers[i]); 154 init_timer(&asoc->timers[i]);
135 asoc->timers[i].function = sctp_timer_events[i]; 155 asoc->timers[i].function = sctp_timer_events[i];
136 asoc->timers[i].data = (unsigned long) asoc; 156 asoc->timers[i].data = (unsigned long) asoc;
@@ -157,10 +177,10 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
157 * RFC 6 - A SCTP receiver MUST be able to receive a minimum of 177 * RFC 6 - A SCTP receiver MUST be able to receive a minimum of
158 * 1500 bytes in one SCTP packet. 178 * 1500 bytes in one SCTP packet.
159 */ 179 */
160 if (sk->sk_rcvbuf < SCTP_DEFAULT_MINWINDOW) 180 if ((sk->sk_rcvbuf/2) < SCTP_DEFAULT_MINWINDOW)
161 asoc->rwnd = SCTP_DEFAULT_MINWINDOW; 181 asoc->rwnd = SCTP_DEFAULT_MINWINDOW;
162 else 182 else
163 asoc->rwnd = sk->sk_rcvbuf; 183 asoc->rwnd = sk->sk_rcvbuf/2;
164 184
165 asoc->a_rwnd = asoc->rwnd; 185 asoc->a_rwnd = asoc->rwnd;
166 186
@@ -172,6 +192,9 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
172 /* Set the sndbuf size for transmit. */ 192 /* Set the sndbuf size for transmit. */
173 asoc->sndbuf_used = 0; 193 asoc->sndbuf_used = 0;
174 194
195 /* Initialize the receive memory counter */
196 atomic_set(&asoc->rmem_alloc, 0);
197
175 init_waitqueue_head(&asoc->wait); 198 init_waitqueue_head(&asoc->wait);
176 199
177 asoc->c.my_vtag = sctp_generate_tag(ep); 200 asoc->c.my_vtag = sctp_generate_tag(ep);
@@ -380,6 +403,8 @@ static void sctp_association_destroy(struct sctp_association *asoc)
380 spin_unlock_bh(&sctp_assocs_id_lock); 403 spin_unlock_bh(&sctp_assocs_id_lock);
381 } 404 }
382 405
406 BUG_TRAP(!atomic_read(&asoc->rmem_alloc));
407
383 if (asoc->base.malloced) { 408 if (asoc->base.malloced) {
384 kfree(asoc); 409 kfree(asoc);
385 SCTP_DBG_OBJCNT_DEC(assoc); 410 SCTP_DBG_OBJCNT_DEC(assoc);
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 96984f7a2d69..67bd53070ee0 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -70,7 +70,6 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
70 struct sock *sk, 70 struct sock *sk,
71 gfp_t gfp) 71 gfp_t gfp)
72{ 72{
73 struct sctp_sock *sp = sctp_sk(sk);
74 memset(ep, 0, sizeof(struct sctp_endpoint)); 73 memset(ep, 0, sizeof(struct sctp_endpoint));
75 74
76 /* Initialize the base structure. */ 75 /* Initialize the base structure. */
@@ -100,33 +99,14 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
100 /* Create the lists of associations. */ 99 /* Create the lists of associations. */
101 INIT_LIST_HEAD(&ep->asocs); 100 INIT_LIST_HEAD(&ep->asocs);
102 101
103 /* Set up the base timeout information. */
104 ep->timeouts[SCTP_EVENT_TIMEOUT_NONE] = 0;
105 ep->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] =
106 msecs_to_jiffies(sp->rtoinfo.srto_initial);
107 ep->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] =
108 msecs_to_jiffies(sp->rtoinfo.srto_initial);
109 ep->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] =
110 msecs_to_jiffies(sp->rtoinfo.srto_initial);
111 ep->timeouts[SCTP_EVENT_TIMEOUT_T3_RTX] = 0;
112 ep->timeouts[SCTP_EVENT_TIMEOUT_T4_RTO] = 0;
113
114 /* sctpimpguide-05 Section 2.12.2
115 * If the 'T5-shutdown-guard' timer is used, it SHOULD be set to the
116 * recommended value of 5 times 'RTO.Max'.
117 */
118 ep->timeouts[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]
119 = 5 * msecs_to_jiffies(sp->rtoinfo.srto_max);
120
121 ep->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0;
122 ep->timeouts[SCTP_EVENT_TIMEOUT_SACK] = sctp_sack_timeout;
123 ep->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = sp->autoclose * HZ;
124
125 /* Use SCTP specific send buffer space queues. */ 102 /* Use SCTP specific send buffer space queues. */
126 ep->sndbuf_policy = sctp_sndbuf_policy; 103 ep->sndbuf_policy = sctp_sndbuf_policy;
127 sk->sk_write_space = sctp_write_space; 104 sk->sk_write_space = sctp_write_space;
128 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); 105 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
129 106
107 /* Get the receive buffer policy for this endpoint */
108 ep->rcvbuf_policy = sctp_rcvbuf_policy;
109
130 /* Initialize the secret key used with cookie. */ 110 /* Initialize the secret key used with cookie. */
131 get_random_bytes(&ep->secret_key[0], SCTP_SECRET_SIZE); 111 get_random_bytes(&ep->secret_key[0], SCTP_SECRET_SIZE);
132 ep->last_key = ep->current_key = 0; 112 ep->last_key = ep->current_key = 0;
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 28f32243397f..b24ff2c1aef5 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -100,21 +100,6 @@ static inline int sctp_rcv_checksum(struct sk_buff *skb)
100 return 0; 100 return 0;
101} 101}
102 102
103/* The free routine for skbuffs that sctp receives */
104static void sctp_rfree(struct sk_buff *skb)
105{
106 atomic_sub(sizeof(struct sctp_chunk),&skb->sk->sk_rmem_alloc);
107 sock_rfree(skb);
108}
109
110/* The ownership wrapper routine to do receive buffer accounting */
111static void sctp_rcv_set_owner_r(struct sk_buff *skb, struct sock *sk)
112{
113 skb_set_owner_r(skb,sk);
114 skb->destructor = sctp_rfree;
115 atomic_add(sizeof(struct sctp_chunk),&sk->sk_rmem_alloc);
116}
117
118struct sctp_input_cb { 103struct sctp_input_cb {
119 union { 104 union {
120 struct inet_skb_parm h4; 105 struct inet_skb_parm h4;
@@ -217,9 +202,6 @@ int sctp_rcv(struct sk_buff *skb)
217 rcvr = &ep->base; 202 rcvr = &ep->base;
218 } 203 }
219 204
220 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
221 goto discard_release;
222
223 /* 205 /*
224 * RFC 2960, 8.4 - Handle "Out of the blue" Packets. 206 * RFC 2960, 8.4 - Handle "Out of the blue" Packets.
225 * An SCTP packet is called an "out of the blue" (OOTB) 207 * An SCTP packet is called an "out of the blue" (OOTB)
@@ -256,8 +238,6 @@ int sctp_rcv(struct sk_buff *skb)
256 } 238 }
257 SCTP_INPUT_CB(skb)->chunk = chunk; 239 SCTP_INPUT_CB(skb)->chunk = chunk;
258 240
259 sctp_rcv_set_owner_r(skb,sk);
260
261 /* Remember what endpoint is to handle this packet. */ 241 /* Remember what endpoint is to handle this packet. */
262 chunk->rcvr = rcvr; 242 chunk->rcvr = rcvr;
263 243
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 26de4d3e1bd9..f775d78aa59d 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -530,6 +530,9 @@ static void sctp_v4_get_saddr(struct sctp_association *asoc,
530{ 530{
531 struct rtable *rt = (struct rtable *)dst; 531 struct rtable *rt = (struct rtable *)dst;
532 532
533 if (!asoc)
534 return;
535
533 if (rt) { 536 if (rt) {
534 saddr->v4.sin_family = AF_INET; 537 saddr->v4.sin_family = AF_INET;
535 saddr->v4.sin_port = asoc->base.bind_addr.port; 538 saddr->v4.sin_port = asoc->base.bind_addr.port;
@@ -1047,6 +1050,9 @@ SCTP_STATIC __init int sctp_init(void)
1047 /* Sendbuffer growth - do per-socket accounting */ 1050 /* Sendbuffer growth - do per-socket accounting */
1048 sctp_sndbuf_policy = 0; 1051 sctp_sndbuf_policy = 0;
1049 1052
1053 /* Rcvbuffer growth - do per-socket accounting */
1054 sctp_rcvbuf_policy = 0;
1055
1050 /* HB.interval - 30 seconds */ 1056 /* HB.interval - 30 seconds */
1051 sctp_hb_interval = SCTP_DEFAULT_TIMEOUT_HEARTBEAT; 1057 sctp_hb_interval = SCTP_DEFAULT_TIMEOUT_HEARTBEAT;
1052 1058
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index f84173ea8ec1..823947170a33 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -385,7 +385,7 @@ sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES] = {
385 NULL, 385 NULL,
386 sctp_generate_t4_rto_event, 386 sctp_generate_t4_rto_event,
387 sctp_generate_t5_shutdown_guard_event, 387 sctp_generate_t5_shutdown_guard_event,
388 sctp_generate_heartbeat_event, 388 NULL,
389 sctp_generate_sack_event, 389 sctp_generate_sack_event,
390 sctp_generate_autoclose_event, 390 sctp_generate_autoclose_event,
391}; 391};
@@ -689,9 +689,9 @@ static void sctp_cmd_new_state(sctp_cmd_seq_t *cmds,
689 * increased due to timer expirations. 689 * increased due to timer expirations.
690 */ 690 */
691 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] = 691 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] =
692 asoc->ep->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT]; 692 asoc->rto_initial;
693 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] = 693 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] =
694 asoc->ep->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE]; 694 asoc->rto_initial;
695 } 695 }
696 696
697 if (sctp_state(asoc, ESTABLISHED) || 697 if (sctp_state(asoc, ESTABLISHED) ||
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 505c7de10c50..475bfb4972d9 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -5160,6 +5160,8 @@ static int sctp_eat_data(const struct sctp_association *asoc,
5160 sctp_verb_t deliver; 5160 sctp_verb_t deliver;
5161 int tmp; 5161 int tmp;
5162 __u32 tsn; 5162 __u32 tsn;
5163 int account_value;
5164 struct sock *sk = asoc->base.sk;
5163 5165
5164 data_hdr = chunk->subh.data_hdr = (sctp_datahdr_t *)chunk->skb->data; 5166 data_hdr = chunk->subh.data_hdr = (sctp_datahdr_t *)chunk->skb->data;
5165 skb_pull(chunk->skb, sizeof(sctp_datahdr_t)); 5167 skb_pull(chunk->skb, sizeof(sctp_datahdr_t));
@@ -5169,6 +5171,26 @@ static int sctp_eat_data(const struct sctp_association *asoc,
5169 5171
5170 /* ASSERT: Now skb->data is really the user data. */ 5172 /* ASSERT: Now skb->data is really the user data. */
5171 5173
5174 /*
5175 * if we are established, and we have used up our receive
5176 * buffer memory, drop the frame
5177 */
5178 if (asoc->state == SCTP_STATE_ESTABLISHED) {
5179 /*
5180 * If the receive buffer policy is 1, then each
5181 * association can allocate up to sk_rcvbuf bytes
5182 * otherwise, all the associations in aggregate
5183 * may allocate up to sk_rcvbuf bytes
5184 */
5185 if (asoc->ep->rcvbuf_policy)
5186 account_value = atomic_read(&asoc->rmem_alloc);
5187 else
5188 account_value = atomic_read(&sk->sk_rmem_alloc);
5189
5190 if (account_value > sk->sk_rcvbuf)
5191 return SCTP_IERROR_IGNORE_TSN;
5192 }
5193
5172 /* Process ECN based congestion. 5194 /* Process ECN based congestion.
5173 * 5195 *
5174 * Since the chunk structure is reused for all chunks within 5196 * Since the chunk structure is reused for all chunks within
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index b529af5e6f2a..abab81f3818f 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -1932,7 +1932,6 @@ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval,
1932 if (copy_from_user(&sp->autoclose, optval, optlen)) 1932 if (copy_from_user(&sp->autoclose, optval, optlen))
1933 return -EFAULT; 1933 return -EFAULT;
1934 1934
1935 sp->ep->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = sp->autoclose * HZ;
1936 return 0; 1935 return 0;
1937} 1936}
1938 1937
@@ -5115,8 +5114,10 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
5115 sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) { 5114 sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) {
5116 event = sctp_skb2event(skb); 5115 event = sctp_skb2event(skb);
5117 if (event->asoc == assoc) { 5116 if (event->asoc == assoc) {
5117 sock_rfree(skb);
5118 __skb_unlink(skb, &oldsk->sk_receive_queue); 5118 __skb_unlink(skb, &oldsk->sk_receive_queue);
5119 __skb_queue_tail(&newsk->sk_receive_queue, skb); 5119 __skb_queue_tail(&newsk->sk_receive_queue, skb);
5120 skb_set_owner_r(skb, newsk);
5120 } 5121 }
5121 } 5122 }
5122 5123
@@ -5144,8 +5145,10 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
5144 sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) { 5145 sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) {
5145 event = sctp_skb2event(skb); 5146 event = sctp_skb2event(skb);
5146 if (event->asoc == assoc) { 5147 if (event->asoc == assoc) {
5148 sock_rfree(skb);
5147 __skb_unlink(skb, &oldsp->pd_lobby); 5149 __skb_unlink(skb, &oldsp->pd_lobby);
5148 __skb_queue_tail(queue, skb); 5150 __skb_queue_tail(queue, skb);
5151 skb_set_owner_r(skb, newsk);
5149 } 5152 }
5150 } 5153 }
5151 5154
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index 75b28dd634fe..fcd7096c953d 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -121,6 +121,14 @@ static ctl_table sctp_table[] = {
121 .proc_handler = &proc_dointvec 121 .proc_handler = &proc_dointvec
122 }, 122 },
123 { 123 {
124 .ctl_name = NET_SCTP_RCVBUF_POLICY,
125 .procname = "rcvbuf_policy",
126 .data = &sctp_rcvbuf_policy,
127 .maxlen = sizeof(int),
128 .mode = 0644,
129 .proc_handler = &proc_dointvec
130 },
131 {
124 .ctl_name = NET_SCTP_PATH_MAX_RETRANS, 132 .ctl_name = NET_SCTP_PATH_MAX_RETRANS,
125 .procname = "path_max_retrans", 133 .procname = "path_max_retrans",
126 .data = &sctp_max_retrans_path, 134 .data = &sctp_max_retrans_path,
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index e049f41faa47..ba97f974f57c 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -52,19 +52,6 @@ static void sctp_ulpevent_receive_data(struct sctp_ulpevent *event,
52 struct sctp_association *asoc); 52 struct sctp_association *asoc);
53static void sctp_ulpevent_release_data(struct sctp_ulpevent *event); 53static void sctp_ulpevent_release_data(struct sctp_ulpevent *event);
54 54
55/* Stub skb destructor. */
56static void sctp_stub_rfree(struct sk_buff *skb)
57{
58/* WARNING: This function is just a warning not to use the
59 * skb destructor. If the skb is shared, we may get the destructor
60 * callback on some processor that does not own the sock_lock. This
61 * was occuring with PACKET socket applications that were monitoring
62 * our skbs. We can't take the sock_lock, because we can't risk
63 * recursing if we do really own the sock lock. Instead, do all
64 * of our rwnd manipulation while we own the sock_lock outright.
65 */
66}
67
68/* Initialize an ULP event from an given skb. */ 55/* Initialize an ULP event from an given skb. */
69SCTP_STATIC void sctp_ulpevent_init(struct sctp_ulpevent *event, int msg_flags) 56SCTP_STATIC void sctp_ulpevent_init(struct sctp_ulpevent *event, int msg_flags)
70{ 57{
@@ -111,15 +98,19 @@ static inline void sctp_ulpevent_set_owner(struct sctp_ulpevent *event,
111 */ 98 */
112 sctp_association_hold((struct sctp_association *)asoc); 99 sctp_association_hold((struct sctp_association *)asoc);
113 skb = sctp_event2skb(event); 100 skb = sctp_event2skb(event);
114 skb->sk = asoc->base.sk;
115 event->asoc = (struct sctp_association *)asoc; 101 event->asoc = (struct sctp_association *)asoc;
116 skb->destructor = sctp_stub_rfree; 102 atomic_add(skb->truesize, &event->asoc->rmem_alloc);
103 skb_set_owner_r(skb, asoc->base.sk);
117} 104}
118 105
119/* A simple destructor to give up the reference to the association. */ 106/* A simple destructor to give up the reference to the association. */
120static inline void sctp_ulpevent_release_owner(struct sctp_ulpevent *event) 107static inline void sctp_ulpevent_release_owner(struct sctp_ulpevent *event)
121{ 108{
122 sctp_association_put(event->asoc); 109 struct sctp_association *asoc = event->asoc;
110 struct sk_buff *skb = sctp_event2skb(event);
111
112 atomic_sub(skb->truesize, &asoc->rmem_alloc);
113 sctp_association_put(asoc);
123} 114}
124 115
125/* Create and initialize an SCTP_ASSOC_CHANGE event. 116/* Create and initialize an SCTP_ASSOC_CHANGE event.
@@ -922,7 +913,6 @@ done:
922/* Free a ulpevent that has an owner. It includes releasing the reference 913/* Free a ulpevent that has an owner. It includes releasing the reference
923 * to the owner, updating the rwnd in case of a DATA event and freeing the 914 * to the owner, updating the rwnd in case of a DATA event and freeing the
924 * skb. 915 * skb.
925 * See comments in sctp_stub_rfree().
926 */ 916 */
927void sctp_ulpevent_free(struct sctp_ulpevent *event) 917void sctp_ulpevent_free(struct sctp_ulpevent *event)
928{ 918{