diff options
Diffstat (limited to 'net/sctp')
-rw-r--r-- | net/sctp/associola.c | 21 | ||||
-rw-r--r-- | net/sctp/bind_addr.c | 70 | ||||
-rw-r--r-- | net/sctp/endpointola.c | 27 | ||||
-rw-r--r-- | net/sctp/input.c | 8 | ||||
-rw-r--r-- | net/sctp/inqueue.c | 8 | ||||
-rw-r--r-- | net/sctp/ipv6.c | 46 | ||||
-rw-r--r-- | net/sctp/outqueue.c | 7 | ||||
-rw-r--r-- | net/sctp/protocol.c | 79 | ||||
-rw-r--r-- | net/sctp/sm_make_chunk.c | 176 | ||||
-rw-r--r-- | net/sctp/sm_sideeffect.c | 8 | ||||
-rw-r--r-- | net/sctp/sm_statefuns.c | 294 | ||||
-rw-r--r-- | net/sctp/sm_statetable.c | 16 | ||||
-rw-r--r-- | net/sctp/socket.c | 137 | ||||
-rw-r--r-- | net/sctp/ulpqueue.c | 75 |
14 files changed, 653 insertions, 319 deletions
diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 498edb0cd4e5..9bad8ba0feda 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c | |||
@@ -99,7 +99,6 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a | |||
99 | 99 | ||
100 | /* Initialize the bind addr area. */ | 100 | /* Initialize the bind addr area. */ |
101 | sctp_bind_addr_init(&asoc->base.bind_addr, ep->base.bind_addr.port); | 101 | sctp_bind_addr_init(&asoc->base.bind_addr, ep->base.bind_addr.port); |
102 | rwlock_init(&asoc->base.addr_lock); | ||
103 | 102 | ||
104 | asoc->state = SCTP_STATE_CLOSED; | 103 | asoc->state = SCTP_STATE_CLOSED; |
105 | 104 | ||
@@ -727,7 +726,12 @@ void sctp_assoc_control_transport(struct sctp_association *asoc, | |||
727 | break; | 726 | break; |
728 | 727 | ||
729 | case SCTP_TRANSPORT_DOWN: | 728 | case SCTP_TRANSPORT_DOWN: |
730 | transport->state = SCTP_INACTIVE; | 729 | /* if the transort was never confirmed, do not transition it |
730 | * to inactive state. | ||
731 | */ | ||
732 | if (transport->state != SCTP_UNCONFIRMED) | ||
733 | transport->state = SCTP_INACTIVE; | ||
734 | |||
731 | spc_state = SCTP_ADDR_UNREACHABLE; | 735 | spc_state = SCTP_ADDR_UNREACHABLE; |
732 | break; | 736 | break; |
733 | 737 | ||
@@ -932,8 +936,6 @@ struct sctp_transport *sctp_assoc_is_match(struct sctp_association *asoc, | |||
932 | { | 936 | { |
933 | struct sctp_transport *transport; | 937 | struct sctp_transport *transport; |
934 | 938 | ||
935 | sctp_read_lock(&asoc->base.addr_lock); | ||
936 | |||
937 | if ((htons(asoc->base.bind_addr.port) == laddr->v4.sin_port) && | 939 | if ((htons(asoc->base.bind_addr.port) == laddr->v4.sin_port) && |
938 | (htons(asoc->peer.port) == paddr->v4.sin_port)) { | 940 | (htons(asoc->peer.port) == paddr->v4.sin_port)) { |
939 | transport = sctp_assoc_lookup_paddr(asoc, paddr); | 941 | transport = sctp_assoc_lookup_paddr(asoc, paddr); |
@@ -947,7 +949,6 @@ struct sctp_transport *sctp_assoc_is_match(struct sctp_association *asoc, | |||
947 | transport = NULL; | 949 | transport = NULL; |
948 | 950 | ||
949 | out: | 951 | out: |
950 | sctp_read_unlock(&asoc->base.addr_lock); | ||
951 | return transport; | 952 | return transport; |
952 | } | 953 | } |
953 | 954 | ||
@@ -1371,19 +1372,13 @@ int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *asoc, | |||
1371 | int sctp_assoc_lookup_laddr(struct sctp_association *asoc, | 1372 | int sctp_assoc_lookup_laddr(struct sctp_association *asoc, |
1372 | const union sctp_addr *laddr) | 1373 | const union sctp_addr *laddr) |
1373 | { | 1374 | { |
1374 | int found; | 1375 | int found = 0; |
1375 | 1376 | ||
1376 | sctp_read_lock(&asoc->base.addr_lock); | ||
1377 | if ((asoc->base.bind_addr.port == ntohs(laddr->v4.sin_port)) && | 1377 | if ((asoc->base.bind_addr.port == ntohs(laddr->v4.sin_port)) && |
1378 | sctp_bind_addr_match(&asoc->base.bind_addr, laddr, | 1378 | sctp_bind_addr_match(&asoc->base.bind_addr, laddr, |
1379 | sctp_sk(asoc->base.sk))) { | 1379 | sctp_sk(asoc->base.sk))) |
1380 | found = 1; | 1380 | found = 1; |
1381 | goto out; | ||
1382 | } | ||
1383 | 1381 | ||
1384 | found = 0; | ||
1385 | out: | ||
1386 | sctp_read_unlock(&asoc->base.addr_lock); | ||
1387 | return found; | 1382 | return found; |
1388 | } | 1383 | } |
1389 | 1384 | ||
diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c index fdb287a9e2e2..dfffa94fb9f6 100644 --- a/net/sctp/bind_addr.c +++ b/net/sctp/bind_addr.c | |||
@@ -163,9 +163,15 @@ int sctp_add_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *new, | |||
163 | addr->a.v4.sin_port = htons(bp->port); | 163 | addr->a.v4.sin_port = htons(bp->port); |
164 | 164 | ||
165 | addr->use_as_src = use_as_src; | 165 | addr->use_as_src = use_as_src; |
166 | addr->valid = 1; | ||
166 | 167 | ||
167 | INIT_LIST_HEAD(&addr->list); | 168 | INIT_LIST_HEAD(&addr->list); |
168 | list_add_tail(&addr->list, &bp->address_list); | 169 | INIT_RCU_HEAD(&addr->rcu); |
170 | |||
171 | /* We always hold a socket lock when calling this function, | ||
172 | * and that acts as a writer synchronizing lock. | ||
173 | */ | ||
174 | list_add_tail_rcu(&addr->list, &bp->address_list); | ||
169 | SCTP_DBG_OBJCNT_INC(addr); | 175 | SCTP_DBG_OBJCNT_INC(addr); |
170 | 176 | ||
171 | return 0; | 177 | return 0; |
@@ -174,23 +180,35 @@ int sctp_add_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *new, | |||
174 | /* Delete an address from the bind address list in the SCTP_bind_addr | 180 | /* Delete an address from the bind address list in the SCTP_bind_addr |
175 | * structure. | 181 | * structure. |
176 | */ | 182 | */ |
177 | int sctp_del_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *del_addr) | 183 | int sctp_del_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *del_addr, |
184 | void fastcall (*rcu_call)(struct rcu_head *head, | ||
185 | void (*func)(struct rcu_head *head))) | ||
178 | { | 186 | { |
179 | struct list_head *pos, *temp; | 187 | struct sctp_sockaddr_entry *addr, *temp; |
180 | struct sctp_sockaddr_entry *addr; | ||
181 | 188 | ||
182 | list_for_each_safe(pos, temp, &bp->address_list) { | 189 | /* We hold the socket lock when calling this function, |
183 | addr = list_entry(pos, struct sctp_sockaddr_entry, list); | 190 | * and that acts as a writer synchronizing lock. |
191 | */ | ||
192 | list_for_each_entry_safe(addr, temp, &bp->address_list, list) { | ||
184 | if (sctp_cmp_addr_exact(&addr->a, del_addr)) { | 193 | if (sctp_cmp_addr_exact(&addr->a, del_addr)) { |
185 | /* Found the exact match. */ | 194 | /* Found the exact match. */ |
186 | list_del(pos); | 195 | addr->valid = 0; |
187 | kfree(addr); | 196 | list_del_rcu(&addr->list); |
188 | SCTP_DBG_OBJCNT_DEC(addr); | 197 | break; |
189 | |||
190 | return 0; | ||
191 | } | 198 | } |
192 | } | 199 | } |
193 | 200 | ||
201 | /* Call the rcu callback provided in the args. This function is | ||
202 | * called by both BH packet processing and user side socket option | ||
203 | * processing, but it works on different lists in those 2 contexts. | ||
204 | * Each context provides it's own callback, whether call_rcu_bh() | ||
205 | * or call_rcu(), to make sure that we wait for an appropriate time. | ||
206 | */ | ||
207 | if (addr && !addr->valid) { | ||
208 | rcu_call(&addr->rcu, sctp_local_addr_free); | ||
209 | SCTP_DBG_OBJCNT_DEC(addr); | ||
210 | } | ||
211 | |||
194 | return -EINVAL; | 212 | return -EINVAL; |
195 | } | 213 | } |
196 | 214 | ||
@@ -300,15 +318,20 @@ int sctp_bind_addr_match(struct sctp_bind_addr *bp, | |||
300 | struct sctp_sock *opt) | 318 | struct sctp_sock *opt) |
301 | { | 319 | { |
302 | struct sctp_sockaddr_entry *laddr; | 320 | struct sctp_sockaddr_entry *laddr; |
303 | struct list_head *pos; | 321 | int match = 0; |
304 | 322 | ||
305 | list_for_each(pos, &bp->address_list) { | 323 | rcu_read_lock(); |
306 | laddr = list_entry(pos, struct sctp_sockaddr_entry, list); | 324 | list_for_each_entry_rcu(laddr, &bp->address_list, list) { |
307 | if (opt->pf->cmp_addr(&laddr->a, addr, opt)) | 325 | if (!laddr->valid) |
308 | return 1; | 326 | continue; |
327 | if (opt->pf->cmp_addr(&laddr->a, addr, opt)) { | ||
328 | match = 1; | ||
329 | break; | ||
330 | } | ||
309 | } | 331 | } |
332 | rcu_read_unlock(); | ||
310 | 333 | ||
311 | return 0; | 334 | return match; |
312 | } | 335 | } |
313 | 336 | ||
314 | /* Find the first address in the bind address list that is not present in | 337 | /* Find the first address in the bind address list that is not present in |
@@ -323,18 +346,19 @@ union sctp_addr *sctp_find_unmatch_addr(struct sctp_bind_addr *bp, | |||
323 | union sctp_addr *addr; | 346 | union sctp_addr *addr; |
324 | void *addr_buf; | 347 | void *addr_buf; |
325 | struct sctp_af *af; | 348 | struct sctp_af *af; |
326 | struct list_head *pos; | ||
327 | int i; | 349 | int i; |
328 | 350 | ||
329 | list_for_each(pos, &bp->address_list) { | 351 | /* This is only called sctp_send_asconf_del_ip() and we hold |
330 | laddr = list_entry(pos, struct sctp_sockaddr_entry, list); | 352 | * the socket lock in that code patch, so that address list |
331 | 353 | * can't change. | |
354 | */ | ||
355 | list_for_each_entry(laddr, &bp->address_list, list) { | ||
332 | addr_buf = (union sctp_addr *)addrs; | 356 | addr_buf = (union sctp_addr *)addrs; |
333 | for (i = 0; i < addrcnt; i++) { | 357 | for (i = 0; i < addrcnt; i++) { |
334 | addr = (union sctp_addr *)addr_buf; | 358 | addr = (union sctp_addr *)addr_buf; |
335 | af = sctp_get_af_specific(addr->v4.sin_family); | 359 | af = sctp_get_af_specific(addr->v4.sin_family); |
336 | if (!af) | 360 | if (!af) |
337 | return NULL; | 361 | break; |
338 | 362 | ||
339 | if (opt->pf->cmp_addr(&laddr->a, addr, opt)) | 363 | if (opt->pf->cmp_addr(&laddr->a, addr, opt)) |
340 | break; | 364 | break; |
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c index 1404a9e2e78f..8f485a0d14bd 100644 --- a/net/sctp/endpointola.c +++ b/net/sctp/endpointola.c | |||
@@ -92,7 +92,6 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep, | |||
92 | 92 | ||
93 | /* Initialize the bind addr area */ | 93 | /* Initialize the bind addr area */ |
94 | sctp_bind_addr_init(&ep->base.bind_addr, 0); | 94 | sctp_bind_addr_init(&ep->base.bind_addr, 0); |
95 | rwlock_init(&ep->base.addr_lock); | ||
96 | 95 | ||
97 | /* Remember who we are attached to. */ | 96 | /* Remember who we are attached to. */ |
98 | ep->base.sk = sk; | 97 | ep->base.sk = sk; |
@@ -225,21 +224,14 @@ void sctp_endpoint_put(struct sctp_endpoint *ep) | |||
225 | struct sctp_endpoint *sctp_endpoint_is_match(struct sctp_endpoint *ep, | 224 | struct sctp_endpoint *sctp_endpoint_is_match(struct sctp_endpoint *ep, |
226 | const union sctp_addr *laddr) | 225 | const union sctp_addr *laddr) |
227 | { | 226 | { |
228 | struct sctp_endpoint *retval; | 227 | struct sctp_endpoint *retval = NULL; |
229 | 228 | ||
230 | sctp_read_lock(&ep->base.addr_lock); | ||
231 | if (htons(ep->base.bind_addr.port) == laddr->v4.sin_port) { | 229 | if (htons(ep->base.bind_addr.port) == laddr->v4.sin_port) { |
232 | if (sctp_bind_addr_match(&ep->base.bind_addr, laddr, | 230 | if (sctp_bind_addr_match(&ep->base.bind_addr, laddr, |
233 | sctp_sk(ep->base.sk))) { | 231 | sctp_sk(ep->base.sk))) |
234 | retval = ep; | 232 | retval = ep; |
235 | goto out; | ||
236 | } | ||
237 | } | 233 | } |
238 | 234 | ||
239 | retval = NULL; | ||
240 | |||
241 | out: | ||
242 | sctp_read_unlock(&ep->base.addr_lock); | ||
243 | return retval; | 235 | return retval; |
244 | } | 236 | } |
245 | 237 | ||
@@ -261,9 +253,7 @@ static struct sctp_association *__sctp_endpoint_lookup_assoc( | |||
261 | list_for_each(pos, &ep->asocs) { | 253 | list_for_each(pos, &ep->asocs) { |
262 | asoc = list_entry(pos, struct sctp_association, asocs); | 254 | asoc = list_entry(pos, struct sctp_association, asocs); |
263 | if (rport == asoc->peer.port) { | 255 | if (rport == asoc->peer.port) { |
264 | sctp_read_lock(&asoc->base.addr_lock); | ||
265 | *transport = sctp_assoc_lookup_paddr(asoc, paddr); | 256 | *transport = sctp_assoc_lookup_paddr(asoc, paddr); |
266 | sctp_read_unlock(&asoc->base.addr_lock); | ||
267 | 257 | ||
268 | if (*transport) | 258 | if (*transport) |
269 | return asoc; | 259 | return asoc; |
@@ -295,20 +285,17 @@ struct sctp_association *sctp_endpoint_lookup_assoc( | |||
295 | int sctp_endpoint_is_peeled_off(struct sctp_endpoint *ep, | 285 | int sctp_endpoint_is_peeled_off(struct sctp_endpoint *ep, |
296 | const union sctp_addr *paddr) | 286 | const union sctp_addr *paddr) |
297 | { | 287 | { |
298 | struct list_head *pos; | ||
299 | struct sctp_sockaddr_entry *addr; | 288 | struct sctp_sockaddr_entry *addr; |
300 | struct sctp_bind_addr *bp; | 289 | struct sctp_bind_addr *bp; |
301 | 290 | ||
302 | sctp_read_lock(&ep->base.addr_lock); | ||
303 | bp = &ep->base.bind_addr; | 291 | bp = &ep->base.bind_addr; |
304 | list_for_each(pos, &bp->address_list) { | 292 | /* This function is called with the socket lock held, |
305 | addr = list_entry(pos, struct sctp_sockaddr_entry, list); | 293 | * so the address_list can not change. |
306 | if (sctp_has_association(&addr->a, paddr)) { | 294 | */ |
307 | sctp_read_unlock(&ep->base.addr_lock); | 295 | list_for_each_entry(addr, &bp->address_list, list) { |
296 | if (sctp_has_association(&addr->a, paddr)) | ||
308 | return 1; | 297 | return 1; |
309 | } | ||
310 | } | 298 | } |
311 | sctp_read_unlock(&ep->base.addr_lock); | ||
312 | 299 | ||
313 | return 0; | 300 | return 0; |
314 | } | 301 | } |
diff --git a/net/sctp/input.c b/net/sctp/input.c index 47e56017f4ce..f9a0c9276e3b 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c | |||
@@ -622,6 +622,14 @@ static int sctp_rcv_ootb(struct sk_buff *skb) | |||
622 | if (SCTP_CID_SHUTDOWN_COMPLETE == ch->type) | 622 | if (SCTP_CID_SHUTDOWN_COMPLETE == ch->type) |
623 | goto discard; | 623 | goto discard; |
624 | 624 | ||
625 | /* RFC 4460, 2.11.2 | ||
626 | * This will discard packets with INIT chunk bundled as | ||
627 | * subsequent chunks in the packet. When INIT is first, | ||
628 | * the normal INIT processing will discard the chunk. | ||
629 | */ | ||
630 | if (SCTP_CID_INIT == ch->type && (void *)ch != skb->data) | ||
631 | goto discard; | ||
632 | |||
625 | /* RFC 8.4, 7) If the packet contains a "Stale cookie" ERROR | 633 | /* RFC 8.4, 7) If the packet contains a "Stale cookie" ERROR |
626 | * or a COOKIE ACK the SCTP Packet should be silently | 634 | * or a COOKIE ACK the SCTP Packet should be silently |
627 | * discarded. | 635 | * discarded. |
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c index 88aa22407549..e4ea7fdf36ed 100644 --- a/net/sctp/inqueue.c +++ b/net/sctp/inqueue.c | |||
@@ -130,6 +130,14 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue) | |||
130 | /* Force chunk->skb->data to chunk->chunk_end. */ | 130 | /* Force chunk->skb->data to chunk->chunk_end. */ |
131 | skb_pull(chunk->skb, | 131 | skb_pull(chunk->skb, |
132 | chunk->chunk_end - chunk->skb->data); | 132 | chunk->chunk_end - chunk->skb->data); |
133 | |||
134 | /* Verify that we have at least chunk headers | ||
135 | * worth of buffer left. | ||
136 | */ | ||
137 | if (skb_headlen(chunk->skb) < sizeof(sctp_chunkhdr_t)) { | ||
138 | sctp_chunk_free(chunk); | ||
139 | chunk = queue->in_progress = NULL; | ||
140 | } | ||
133 | } | 141 | } |
134 | } | 142 | } |
135 | 143 | ||
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index f8aa23dda1c1..670fd2740b89 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c | |||
@@ -77,13 +77,18 @@ | |||
77 | 77 | ||
78 | #include <asm/uaccess.h> | 78 | #include <asm/uaccess.h> |
79 | 79 | ||
80 | /* Event handler for inet6 address addition/deletion events. */ | 80 | /* Event handler for inet6 address addition/deletion events. |
81 | * The sctp_local_addr_list needs to be protocted by a spin lock since | ||
82 | * multiple notifiers (say IPv4 and IPv6) may be running at the same | ||
83 | * time and thus corrupt the list. | ||
84 | * The reader side is protected with RCU. | ||
85 | */ | ||
81 | static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev, | 86 | static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev, |
82 | void *ptr) | 87 | void *ptr) |
83 | { | 88 | { |
84 | struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr; | 89 | struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr; |
85 | struct sctp_sockaddr_entry *addr; | 90 | struct sctp_sockaddr_entry *addr = NULL; |
86 | struct list_head *pos, *temp; | 91 | struct sctp_sockaddr_entry *temp; |
87 | 92 | ||
88 | switch (ev) { | 93 | switch (ev) { |
89 | case NETDEV_UP: | 94 | case NETDEV_UP: |
@@ -94,19 +99,26 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev, | |||
94 | memcpy(&addr->a.v6.sin6_addr, &ifa->addr, | 99 | memcpy(&addr->a.v6.sin6_addr, &ifa->addr, |
95 | sizeof(struct in6_addr)); | 100 | sizeof(struct in6_addr)); |
96 | addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex; | 101 | addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex; |
97 | list_add_tail(&addr->list, &sctp_local_addr_list); | 102 | addr->valid = 1; |
103 | spin_lock_bh(&sctp_local_addr_lock); | ||
104 | list_add_tail_rcu(&addr->list, &sctp_local_addr_list); | ||
105 | spin_unlock_bh(&sctp_local_addr_lock); | ||
98 | } | 106 | } |
99 | break; | 107 | break; |
100 | case NETDEV_DOWN: | 108 | case NETDEV_DOWN: |
101 | list_for_each_safe(pos, temp, &sctp_local_addr_list) { | 109 | spin_lock_bh(&sctp_local_addr_lock); |
102 | addr = list_entry(pos, struct sctp_sockaddr_entry, list); | 110 | list_for_each_entry_safe(addr, temp, |
103 | if (ipv6_addr_equal(&addr->a.v6.sin6_addr, &ifa->addr)) { | 111 | &sctp_local_addr_list, list) { |
104 | list_del(pos); | 112 | if (ipv6_addr_equal(&addr->a.v6.sin6_addr, |
105 | kfree(addr); | 113 | &ifa->addr)) { |
114 | addr->valid = 0; | ||
115 | list_del_rcu(&addr->list); | ||
106 | break; | 116 | break; |
107 | } | 117 | } |
108 | } | 118 | } |
109 | 119 | spin_unlock_bh(&sctp_local_addr_lock); | |
120 | if (addr && !addr->valid) | ||
121 | call_rcu(&addr->rcu, sctp_local_addr_free); | ||
110 | break; | 122 | break; |
111 | } | 123 | } |
112 | 124 | ||
@@ -290,9 +302,7 @@ static void sctp_v6_get_saddr(struct sctp_association *asoc, | |||
290 | union sctp_addr *saddr) | 302 | union sctp_addr *saddr) |
291 | { | 303 | { |
292 | struct sctp_bind_addr *bp; | 304 | struct sctp_bind_addr *bp; |
293 | rwlock_t *addr_lock; | ||
294 | struct sctp_sockaddr_entry *laddr; | 305 | struct sctp_sockaddr_entry *laddr; |
295 | struct list_head *pos; | ||
296 | sctp_scope_t scope; | 306 | sctp_scope_t scope; |
297 | union sctp_addr *baddr = NULL; | 307 | union sctp_addr *baddr = NULL; |
298 | __u8 matchlen = 0; | 308 | __u8 matchlen = 0; |
@@ -312,14 +322,14 @@ static void sctp_v6_get_saddr(struct sctp_association *asoc, | |||
312 | scope = sctp_scope(daddr); | 322 | scope = sctp_scope(daddr); |
313 | 323 | ||
314 | bp = &asoc->base.bind_addr; | 324 | bp = &asoc->base.bind_addr; |
315 | addr_lock = &asoc->base.addr_lock; | ||
316 | 325 | ||
317 | /* Go through the bind address list and find the best source address | 326 | /* Go through the bind address list and find the best source address |
318 | * that matches the scope of the destination address. | 327 | * that matches the scope of the destination address. |
319 | */ | 328 | */ |
320 | sctp_read_lock(addr_lock); | 329 | rcu_read_lock(); |
321 | list_for_each(pos, &bp->address_list) { | 330 | list_for_each_entry_rcu(laddr, &bp->address_list, list) { |
322 | laddr = list_entry(pos, struct sctp_sockaddr_entry, list); | 331 | if (!laddr->valid) |
332 | continue; | ||
323 | if ((laddr->use_as_src) && | 333 | if ((laddr->use_as_src) && |
324 | (laddr->a.sa.sa_family == AF_INET6) && | 334 | (laddr->a.sa.sa_family == AF_INET6) && |
325 | (scope <= sctp_scope(&laddr->a))) { | 335 | (scope <= sctp_scope(&laddr->a))) { |
@@ -341,7 +351,7 @@ static void sctp_v6_get_saddr(struct sctp_association *asoc, | |||
341 | __FUNCTION__, asoc, NIP6(daddr->v6.sin6_addr)); | 351 | __FUNCTION__, asoc, NIP6(daddr->v6.sin6_addr)); |
342 | } | 352 | } |
343 | 353 | ||
344 | sctp_read_unlock(addr_lock); | 354 | rcu_read_unlock(); |
345 | } | 355 | } |
346 | 356 | ||
347 | /* Make a copy of all potential local addresses. */ | 357 | /* Make a copy of all potential local addresses. */ |
@@ -367,7 +377,9 @@ static void sctp_v6_copy_addrlist(struct list_head *addrlist, | |||
367 | addr->a.v6.sin6_port = 0; | 377 | addr->a.v6.sin6_port = 0; |
368 | addr->a.v6.sin6_addr = ifp->addr; | 378 | addr->a.v6.sin6_addr = ifp->addr; |
369 | addr->a.v6.sin6_scope_id = dev->ifindex; | 379 | addr->a.v6.sin6_scope_id = dev->ifindex; |
380 | addr->valid = 1; | ||
370 | INIT_LIST_HEAD(&addr->list); | 381 | INIT_LIST_HEAD(&addr->list); |
382 | INIT_RCU_HEAD(&addr->rcu); | ||
371 | list_add_tail(&addr->list, addrlist); | 383 | list_add_tail(&addr->list, addrlist); |
372 | } | 384 | } |
373 | } | 385 | } |
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index 992f361084b7..28f4fe77ceee 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c | |||
@@ -421,6 +421,13 @@ void sctp_retransmit_mark(struct sctp_outq *q, | |||
421 | */ | 421 | */ |
422 | if ((fast_retransmit && (chunk->fast_retransmit > 0)) || | 422 | if ((fast_retransmit && (chunk->fast_retransmit > 0)) || |
423 | (!fast_retransmit && !chunk->tsn_gap_acked)) { | 423 | (!fast_retransmit && !chunk->tsn_gap_acked)) { |
424 | /* If this chunk was sent less then 1 rto ago, do not | ||
425 | * retransmit this chunk, but give the peer time | ||
426 | * to acknowlege it. | ||
427 | */ | ||
428 | if ((jiffies - chunk->sent_at) < transport->rto) | ||
429 | continue; | ||
430 | |||
424 | /* RFC 2960 6.2.1 Processing a Received SACK | 431 | /* RFC 2960 6.2.1 Processing a Received SACK |
425 | * | 432 | * |
426 | * C) Any time a DATA chunk is marked for | 433 | * C) Any time a DATA chunk is marked for |
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index e98579b788b8..3d036cdfae41 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c | |||
@@ -153,6 +153,9 @@ static void sctp_v4_copy_addrlist(struct list_head *addrlist, | |||
153 | addr->a.v4.sin_family = AF_INET; | 153 | addr->a.v4.sin_family = AF_INET; |
154 | addr->a.v4.sin_port = 0; | 154 | addr->a.v4.sin_port = 0; |
155 | addr->a.v4.sin_addr.s_addr = ifa->ifa_local; | 155 | addr->a.v4.sin_addr.s_addr = ifa->ifa_local; |
156 | addr->valid = 1; | ||
157 | INIT_LIST_HEAD(&addr->list); | ||
158 | INIT_RCU_HEAD(&addr->rcu); | ||
156 | list_add_tail(&addr->list, addrlist); | 159 | list_add_tail(&addr->list, addrlist); |
157 | } | 160 | } |
158 | } | 161 | } |
@@ -192,16 +195,24 @@ static void sctp_free_local_addr_list(void) | |||
192 | } | 195 | } |
193 | } | 196 | } |
194 | 197 | ||
198 | void sctp_local_addr_free(struct rcu_head *head) | ||
199 | { | ||
200 | struct sctp_sockaddr_entry *e = container_of(head, | ||
201 | struct sctp_sockaddr_entry, rcu); | ||
202 | kfree(e); | ||
203 | } | ||
204 | |||
195 | /* Copy the local addresses which are valid for 'scope' into 'bp'. */ | 205 | /* Copy the local addresses which are valid for 'scope' into 'bp'. */ |
196 | int sctp_copy_local_addr_list(struct sctp_bind_addr *bp, sctp_scope_t scope, | 206 | int sctp_copy_local_addr_list(struct sctp_bind_addr *bp, sctp_scope_t scope, |
197 | gfp_t gfp, int copy_flags) | 207 | gfp_t gfp, int copy_flags) |
198 | { | 208 | { |
199 | struct sctp_sockaddr_entry *addr; | 209 | struct sctp_sockaddr_entry *addr; |
200 | int error = 0; | 210 | int error = 0; |
201 | struct list_head *pos, *temp; | ||
202 | 211 | ||
203 | list_for_each_safe(pos, temp, &sctp_local_addr_list) { | 212 | rcu_read_lock(); |
204 | addr = list_entry(pos, struct sctp_sockaddr_entry, list); | 213 | list_for_each_entry_rcu(addr, &sctp_local_addr_list, list) { |
214 | if (!addr->valid) | ||
215 | continue; | ||
205 | if (sctp_in_scope(&addr->a, scope)) { | 216 | if (sctp_in_scope(&addr->a, scope)) { |
206 | /* Now that the address is in scope, check to see if | 217 | /* Now that the address is in scope, check to see if |
207 | * the address type is really supported by the local | 218 | * the address type is really supported by the local |
@@ -213,7 +224,7 @@ int sctp_copy_local_addr_list(struct sctp_bind_addr *bp, sctp_scope_t scope, | |||
213 | (copy_flags & SCTP_ADDR6_ALLOWED) && | 224 | (copy_flags & SCTP_ADDR6_ALLOWED) && |
214 | (copy_flags & SCTP_ADDR6_PEERSUPP)))) { | 225 | (copy_flags & SCTP_ADDR6_PEERSUPP)))) { |
215 | error = sctp_add_bind_addr(bp, &addr->a, 1, | 226 | error = sctp_add_bind_addr(bp, &addr->a, 1, |
216 | GFP_ATOMIC); | 227 | GFP_ATOMIC); |
217 | if (error) | 228 | if (error) |
218 | goto end_copy; | 229 | goto end_copy; |
219 | } | 230 | } |
@@ -221,6 +232,7 @@ int sctp_copy_local_addr_list(struct sctp_bind_addr *bp, sctp_scope_t scope, | |||
221 | } | 232 | } |
222 | 233 | ||
223 | end_copy: | 234 | end_copy: |
235 | rcu_read_unlock(); | ||
224 | return error; | 236 | return error; |
225 | } | 237 | } |
226 | 238 | ||
@@ -416,9 +428,7 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc, | |||
416 | struct rtable *rt; | 428 | struct rtable *rt; |
417 | struct flowi fl; | 429 | struct flowi fl; |
418 | struct sctp_bind_addr *bp; | 430 | struct sctp_bind_addr *bp; |
419 | rwlock_t *addr_lock; | ||
420 | struct sctp_sockaddr_entry *laddr; | 431 | struct sctp_sockaddr_entry *laddr; |
421 | struct list_head *pos; | ||
422 | struct dst_entry *dst = NULL; | 432 | struct dst_entry *dst = NULL; |
423 | union sctp_addr dst_saddr; | 433 | union sctp_addr dst_saddr; |
424 | 434 | ||
@@ -447,23 +457,20 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc, | |||
447 | goto out; | 457 | goto out; |
448 | 458 | ||
449 | bp = &asoc->base.bind_addr; | 459 | bp = &asoc->base.bind_addr; |
450 | addr_lock = &asoc->base.addr_lock; | ||
451 | 460 | ||
452 | if (dst) { | 461 | if (dst) { |
453 | /* Walk through the bind address list and look for a bind | 462 | /* Walk through the bind address list and look for a bind |
454 | * address that matches the source address of the returned dst. | 463 | * address that matches the source address of the returned dst. |
455 | */ | 464 | */ |
456 | sctp_read_lock(addr_lock); | 465 | rcu_read_lock(); |
457 | list_for_each(pos, &bp->address_list) { | 466 | list_for_each_entry_rcu(laddr, &bp->address_list, list) { |
458 | laddr = list_entry(pos, struct sctp_sockaddr_entry, | 467 | if (!laddr->valid || !laddr->use_as_src) |
459 | list); | ||
460 | if (!laddr->use_as_src) | ||
461 | continue; | 468 | continue; |
462 | sctp_v4_dst_saddr(&dst_saddr, dst, htons(bp->port)); | 469 | sctp_v4_dst_saddr(&dst_saddr, dst, htons(bp->port)); |
463 | if (sctp_v4_cmp_addr(&dst_saddr, &laddr->a)) | 470 | if (sctp_v4_cmp_addr(&dst_saddr, &laddr->a)) |
464 | goto out_unlock; | 471 | goto out_unlock; |
465 | } | 472 | } |
466 | sctp_read_unlock(addr_lock); | 473 | rcu_read_unlock(); |
467 | 474 | ||
468 | /* None of the bound addresses match the source address of the | 475 | /* None of the bound addresses match the source address of the |
469 | * dst. So release it. | 476 | * dst. So release it. |
@@ -475,10 +482,10 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc, | |||
475 | /* Walk through the bind address list and try to get a dst that | 482 | /* Walk through the bind address list and try to get a dst that |
476 | * matches a bind address as the source address. | 483 | * matches a bind address as the source address. |
477 | */ | 484 | */ |
478 | sctp_read_lock(addr_lock); | 485 | rcu_read_lock(); |
479 | list_for_each(pos, &bp->address_list) { | 486 | list_for_each_entry_rcu(laddr, &bp->address_list, list) { |
480 | laddr = list_entry(pos, struct sctp_sockaddr_entry, list); | 487 | if (!laddr->valid) |
481 | 488 | continue; | |
482 | if ((laddr->use_as_src) && | 489 | if ((laddr->use_as_src) && |
483 | (AF_INET == laddr->a.sa.sa_family)) { | 490 | (AF_INET == laddr->a.sa.sa_family)) { |
484 | fl.fl4_src = laddr->a.v4.sin_addr.s_addr; | 491 | fl.fl4_src = laddr->a.v4.sin_addr.s_addr; |
@@ -490,7 +497,7 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc, | |||
490 | } | 497 | } |
491 | 498 | ||
492 | out_unlock: | 499 | out_unlock: |
493 | sctp_read_unlock(addr_lock); | 500 | rcu_read_unlock(); |
494 | out: | 501 | out: |
495 | if (dst) | 502 | if (dst) |
496 | SCTP_DEBUG_PRINTK("rt_dst:%u.%u.%u.%u, rt_src:%u.%u.%u.%u\n", | 503 | SCTP_DEBUG_PRINTK("rt_dst:%u.%u.%u.%u, rt_src:%u.%u.%u.%u\n", |
@@ -600,13 +607,18 @@ static void sctp_v4_seq_dump_addr(struct seq_file *seq, union sctp_addr *addr) | |||
600 | seq_printf(seq, "%d.%d.%d.%d ", NIPQUAD(addr->v4.sin_addr)); | 607 | seq_printf(seq, "%d.%d.%d.%d ", NIPQUAD(addr->v4.sin_addr)); |
601 | } | 608 | } |
602 | 609 | ||
603 | /* Event handler for inet address addition/deletion events. */ | 610 | /* Event handler for inet address addition/deletion events. |
611 | * The sctp_local_addr_list needs to be protocted by a spin lock since | ||
612 | * multiple notifiers (say IPv4 and IPv6) may be running at the same | ||
613 | * time and thus corrupt the list. | ||
614 | * The reader side is protected with RCU. | ||
615 | */ | ||
604 | static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev, | 616 | static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev, |
605 | void *ptr) | 617 | void *ptr) |
606 | { | 618 | { |
607 | struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; | 619 | struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; |
608 | struct sctp_sockaddr_entry *addr; | 620 | struct sctp_sockaddr_entry *addr = NULL; |
609 | struct list_head *pos, *temp; | 621 | struct sctp_sockaddr_entry *temp; |
610 | 622 | ||
611 | switch (ev) { | 623 | switch (ev) { |
612 | case NETDEV_UP: | 624 | case NETDEV_UP: |
@@ -615,19 +627,25 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev, | |||
615 | addr->a.v4.sin_family = AF_INET; | 627 | addr->a.v4.sin_family = AF_INET; |
616 | addr->a.v4.sin_port = 0; | 628 | addr->a.v4.sin_port = 0; |
617 | addr->a.v4.sin_addr.s_addr = ifa->ifa_local; | 629 | addr->a.v4.sin_addr.s_addr = ifa->ifa_local; |
618 | list_add_tail(&addr->list, &sctp_local_addr_list); | 630 | addr->valid = 1; |
631 | spin_lock_bh(&sctp_local_addr_lock); | ||
632 | list_add_tail_rcu(&addr->list, &sctp_local_addr_list); | ||
633 | spin_unlock_bh(&sctp_local_addr_lock); | ||
619 | } | 634 | } |
620 | break; | 635 | break; |
621 | case NETDEV_DOWN: | 636 | case NETDEV_DOWN: |
622 | list_for_each_safe(pos, temp, &sctp_local_addr_list) { | 637 | spin_lock_bh(&sctp_local_addr_lock); |
623 | addr = list_entry(pos, struct sctp_sockaddr_entry, list); | 638 | list_for_each_entry_safe(addr, temp, |
639 | &sctp_local_addr_list, list) { | ||
624 | if (addr->a.v4.sin_addr.s_addr == ifa->ifa_local) { | 640 | if (addr->a.v4.sin_addr.s_addr == ifa->ifa_local) { |
625 | list_del(pos); | 641 | addr->valid = 0; |
626 | kfree(addr); | 642 | list_del_rcu(&addr->list); |
627 | break; | 643 | break; |
628 | } | 644 | } |
629 | } | 645 | } |
630 | 646 | spin_unlock_bh(&sctp_local_addr_lock); | |
647 | if (addr && !addr->valid) | ||
648 | call_rcu(&addr->rcu, sctp_local_addr_free); | ||
631 | break; | 649 | break; |
632 | } | 650 | } |
633 | 651 | ||
@@ -1160,6 +1178,7 @@ SCTP_STATIC __init int sctp_init(void) | |||
1160 | 1178 | ||
1161 | /* Initialize the local address list. */ | 1179 | /* Initialize the local address list. */ |
1162 | INIT_LIST_HEAD(&sctp_local_addr_list); | 1180 | INIT_LIST_HEAD(&sctp_local_addr_list); |
1181 | spin_lock_init(&sctp_local_addr_lock); | ||
1163 | sctp_get_local_addr_list(); | 1182 | sctp_get_local_addr_list(); |
1164 | 1183 | ||
1165 | /* Register notifier for inet address additions/deletions. */ | 1184 | /* Register notifier for inet address additions/deletions. */ |
@@ -1227,6 +1246,9 @@ SCTP_STATIC __exit void sctp_exit(void) | |||
1227 | sctp_v6_del_protocol(); | 1246 | sctp_v6_del_protocol(); |
1228 | inet_del_protocol(&sctp_protocol, IPPROTO_SCTP); | 1247 | inet_del_protocol(&sctp_protocol, IPPROTO_SCTP); |
1229 | 1248 | ||
1249 | /* Unregister notifier for inet address additions/deletions. */ | ||
1250 | unregister_inetaddr_notifier(&sctp_inetaddr_notifier); | ||
1251 | |||
1230 | /* Free the local address list. */ | 1252 | /* Free the local address list. */ |
1231 | sctp_free_local_addr_list(); | 1253 | sctp_free_local_addr_list(); |
1232 | 1254 | ||
@@ -1240,9 +1262,6 @@ SCTP_STATIC __exit void sctp_exit(void) | |||
1240 | inet_unregister_protosw(&sctp_stream_protosw); | 1262 | inet_unregister_protosw(&sctp_stream_protosw); |
1241 | inet_unregister_protosw(&sctp_seqpacket_protosw); | 1263 | inet_unregister_protosw(&sctp_seqpacket_protosw); |
1242 | 1264 | ||
1243 | /* Unregister notifier for inet address additions/deletions. */ | ||
1244 | unregister_inetaddr_notifier(&sctp_inetaddr_notifier); | ||
1245 | |||
1246 | sctp_sysctl_unregister(); | 1265 | sctp_sysctl_unregister(); |
1247 | list_del(&sctp_ipv4_specific.list); | 1266 | list_del(&sctp_ipv4_specific.list); |
1248 | 1267 | ||
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 51c4d7fef1d2..23ae37ec8711 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c | |||
@@ -110,7 +110,7 @@ static const struct sctp_paramhdr prsctp_param = { | |||
110 | * abort chunk. | 110 | * abort chunk. |
111 | */ | 111 | */ |
112 | void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code, | 112 | void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code, |
113 | const void *payload, size_t paylen) | 113 | size_t paylen) |
114 | { | 114 | { |
115 | sctp_errhdr_t err; | 115 | sctp_errhdr_t err; |
116 | __u16 len; | 116 | __u16 len; |
@@ -120,7 +120,6 @@ void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code, | |||
120 | len = sizeof(sctp_errhdr_t) + paylen; | 120 | len = sizeof(sctp_errhdr_t) + paylen; |
121 | err.length = htons(len); | 121 | err.length = htons(len); |
122 | chunk->subh.err_hdr = sctp_addto_chunk(chunk, sizeof(sctp_errhdr_t), &err); | 122 | chunk->subh.err_hdr = sctp_addto_chunk(chunk, sizeof(sctp_errhdr_t), &err); |
123 | sctp_addto_chunk(chunk, paylen, payload); | ||
124 | } | 123 | } |
125 | 124 | ||
126 | /* 3.3.2 Initiation (INIT) (1) | 125 | /* 3.3.2 Initiation (INIT) (1) |
@@ -780,8 +779,8 @@ struct sctp_chunk *sctp_make_abort_no_data( | |||
780 | 779 | ||
781 | /* Put the tsn back into network byte order. */ | 780 | /* Put the tsn back into network byte order. */ |
782 | payload = htonl(tsn); | 781 | payload = htonl(tsn); |
783 | sctp_init_cause(retval, SCTP_ERROR_NO_DATA, (const void *)&payload, | 782 | sctp_init_cause(retval, SCTP_ERROR_NO_DATA, sizeof(payload)); |
784 | sizeof(payload)); | 783 | sctp_addto_chunk(retval, sizeof(payload), (const void *)&payload); |
785 | 784 | ||
786 | /* RFC 2960 6.4 Multi-homed SCTP Endpoints | 785 | /* RFC 2960 6.4 Multi-homed SCTP Endpoints |
787 | * | 786 | * |
@@ -823,7 +822,8 @@ struct sctp_chunk *sctp_make_abort_user(const struct sctp_association *asoc, | |||
823 | goto err_copy; | 822 | goto err_copy; |
824 | } | 823 | } |
825 | 824 | ||
826 | sctp_init_cause(retval, SCTP_ERROR_USER_ABORT, payload, paylen); | 825 | sctp_init_cause(retval, SCTP_ERROR_USER_ABORT, paylen); |
826 | sctp_addto_chunk(retval, paylen, payload); | ||
827 | 827 | ||
828 | if (paylen) | 828 | if (paylen) |
829 | kfree(payload); | 829 | kfree(payload); |
@@ -850,15 +850,17 @@ struct sctp_chunk *sctp_make_abort_violation( | |||
850 | struct sctp_paramhdr phdr; | 850 | struct sctp_paramhdr phdr; |
851 | 851 | ||
852 | retval = sctp_make_abort(asoc, chunk, sizeof(sctp_errhdr_t) + paylen | 852 | retval = sctp_make_abort(asoc, chunk, sizeof(sctp_errhdr_t) + paylen |
853 | + sizeof(sctp_chunkhdr_t)); | 853 | + sizeof(sctp_paramhdr_t)); |
854 | if (!retval) | 854 | if (!retval) |
855 | goto end; | 855 | goto end; |
856 | 856 | ||
857 | sctp_init_cause(retval, SCTP_ERROR_PROTO_VIOLATION, payload, paylen); | 857 | sctp_init_cause(retval, SCTP_ERROR_PROTO_VIOLATION, paylen |
858 | + sizeof(sctp_paramhdr_t)); | ||
858 | 859 | ||
859 | phdr.type = htons(chunk->chunk_hdr->type); | 860 | phdr.type = htons(chunk->chunk_hdr->type); |
860 | phdr.length = chunk->chunk_hdr->length; | 861 | phdr.length = chunk->chunk_hdr->length; |
861 | sctp_addto_chunk(retval, sizeof(sctp_paramhdr_t), &phdr); | 862 | sctp_addto_chunk(retval, paylen, payload); |
863 | sctp_addto_param(retval, sizeof(sctp_paramhdr_t), &phdr); | ||
862 | 864 | ||
863 | end: | 865 | end: |
864 | return retval; | 866 | return retval; |
@@ -955,7 +957,8 @@ struct sctp_chunk *sctp_make_op_error(const struct sctp_association *asoc, | |||
955 | if (!retval) | 957 | if (!retval) |
956 | goto nodata; | 958 | goto nodata; |
957 | 959 | ||
958 | sctp_init_cause(retval, cause_code, payload, paylen); | 960 | sctp_init_cause(retval, cause_code, paylen); |
961 | sctp_addto_chunk(retval, paylen, payload); | ||
959 | 962 | ||
960 | nodata: | 963 | nodata: |
961 | return retval; | 964 | return retval; |
@@ -1128,7 +1131,7 @@ void *sctp_addto_chunk(struct sctp_chunk *chunk, int len, const void *data) | |||
1128 | void *target; | 1131 | void *target; |
1129 | void *padding; | 1132 | void *padding; |
1130 | int chunklen = ntohs(chunk->chunk_hdr->length); | 1133 | int chunklen = ntohs(chunk->chunk_hdr->length); |
1131 | int padlen = chunklen % 4; | 1134 | int padlen = WORD_ROUND(chunklen) - chunklen; |
1132 | 1135 | ||
1133 | padding = skb_put(chunk->skb, padlen); | 1136 | padding = skb_put(chunk->skb, padlen); |
1134 | target = skb_put(chunk->skb, len); | 1137 | target = skb_put(chunk->skb, len); |
@@ -1143,6 +1146,25 @@ void *sctp_addto_chunk(struct sctp_chunk *chunk, int len, const void *data) | |||
1143 | return target; | 1146 | return target; |
1144 | } | 1147 | } |
1145 | 1148 | ||
1149 | /* Append bytes to the end of a parameter. Will panic if chunk is not big | ||
1150 | * enough. | ||
1151 | */ | ||
1152 | void *sctp_addto_param(struct sctp_chunk *chunk, int len, const void *data) | ||
1153 | { | ||
1154 | void *target; | ||
1155 | int chunklen = ntohs(chunk->chunk_hdr->length); | ||
1156 | |||
1157 | target = skb_put(chunk->skb, len); | ||
1158 | |||
1159 | memcpy(target, data, len); | ||
1160 | |||
1161 | /* Adjust the chunk length field. */ | ||
1162 | chunk->chunk_hdr->length = htons(chunklen + len); | ||
1163 | chunk->chunk_end = skb_tail_pointer(chunk->skb); | ||
1164 | |||
1165 | return target; | ||
1166 | } | ||
1167 | |||
1146 | /* Append bytes from user space to the end of a chunk. Will panic if | 1168 | /* Append bytes from user space to the end of a chunk. Will panic if |
1147 | * chunk is not big enough. | 1169 | * chunk is not big enough. |
1148 | * Returns a kernel err value. | 1170 | * Returns a kernel err value. |
@@ -1174,25 +1196,36 @@ out: | |||
1174 | */ | 1196 | */ |
1175 | void sctp_chunk_assign_ssn(struct sctp_chunk *chunk) | 1197 | void sctp_chunk_assign_ssn(struct sctp_chunk *chunk) |
1176 | { | 1198 | { |
1199 | struct sctp_datamsg *msg; | ||
1200 | struct sctp_chunk *lchunk; | ||
1201 | struct sctp_stream *stream; | ||
1177 | __u16 ssn; | 1202 | __u16 ssn; |
1178 | __u16 sid; | 1203 | __u16 sid; |
1179 | 1204 | ||
1180 | if (chunk->has_ssn) | 1205 | if (chunk->has_ssn) |
1181 | return; | 1206 | return; |
1182 | 1207 | ||
1183 | /* This is the last possible instant to assign a SSN. */ | 1208 | /* All fragments will be on the same stream */ |
1184 | if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) { | 1209 | sid = ntohs(chunk->subh.data_hdr->stream); |
1185 | ssn = 0; | 1210 | stream = &chunk->asoc->ssnmap->out; |
1186 | } else { | 1211 | |
1187 | sid = ntohs(chunk->subh.data_hdr->stream); | 1212 | /* Now assign the sequence number to the entire message. |
1188 | if (chunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG) | 1213 | * All fragments must have the same stream sequence number. |
1189 | ssn = sctp_ssn_next(&chunk->asoc->ssnmap->out, sid); | 1214 | */ |
1190 | else | 1215 | msg = chunk->msg; |
1191 | ssn = sctp_ssn_peek(&chunk->asoc->ssnmap->out, sid); | 1216 | list_for_each_entry(lchunk, &msg->chunks, frag_list) { |
1192 | } | 1217 | if (lchunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) { |
1218 | ssn = 0; | ||
1219 | } else { | ||
1220 | if (lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG) | ||
1221 | ssn = sctp_ssn_next(stream, sid); | ||
1222 | else | ||
1223 | ssn = sctp_ssn_peek(stream, sid); | ||
1224 | } | ||
1193 | 1225 | ||
1194 | chunk->subh.data_hdr->ssn = htons(ssn); | 1226 | lchunk->subh.data_hdr->ssn = htons(ssn); |
1195 | chunk->has_ssn = 1; | 1227 | lchunk->has_ssn = 1; |
1228 | } | ||
1196 | } | 1229 | } |
1197 | 1230 | ||
1198 | /* Helper function to assign a TSN if needed. This assumes that both | 1231 | /* Helper function to assign a TSN if needed. This assumes that both |
@@ -1466,7 +1499,8 @@ no_hmac: | |||
1466 | __be32 n = htonl(usecs); | 1499 | __be32 n = htonl(usecs); |
1467 | 1500 | ||
1468 | sctp_init_cause(*errp, SCTP_ERROR_STALE_COOKIE, | 1501 | sctp_init_cause(*errp, SCTP_ERROR_STALE_COOKIE, |
1469 | &n, sizeof(n)); | 1502 | sizeof(n)); |
1503 | sctp_addto_chunk(*errp, sizeof(n), &n); | ||
1470 | *error = -SCTP_IERROR_STALE_COOKIE; | 1504 | *error = -SCTP_IERROR_STALE_COOKIE; |
1471 | } else | 1505 | } else |
1472 | *error = -SCTP_IERROR_NOMEM; | 1506 | *error = -SCTP_IERROR_NOMEM; |
@@ -1497,7 +1531,7 @@ no_hmac: | |||
1497 | /* Also, add the destination address. */ | 1531 | /* Also, add the destination address. */ |
1498 | if (list_empty(&retval->base.bind_addr.address_list)) { | 1532 | if (list_empty(&retval->base.bind_addr.address_list)) { |
1499 | sctp_add_bind_addr(&retval->base.bind_addr, &chunk->dest, 1, | 1533 | sctp_add_bind_addr(&retval->base.bind_addr, &chunk->dest, 1, |
1500 | GFP_ATOMIC); | 1534 | GFP_ATOMIC); |
1501 | } | 1535 | } |
1502 | 1536 | ||
1503 | retval->next_tsn = retval->c.initial_tsn; | 1537 | retval->next_tsn = retval->c.initial_tsn; |
@@ -1556,7 +1590,8 @@ static int sctp_process_missing_param(const struct sctp_association *asoc, | |||
1556 | report.num_missing = htonl(1); | 1590 | report.num_missing = htonl(1); |
1557 | report.type = paramtype; | 1591 | report.type = paramtype; |
1558 | sctp_init_cause(*errp, SCTP_ERROR_MISS_PARAM, | 1592 | sctp_init_cause(*errp, SCTP_ERROR_MISS_PARAM, |
1559 | &report, sizeof(report)); | 1593 | sizeof(report)); |
1594 | sctp_addto_chunk(*errp, sizeof(report), &report); | ||
1560 | } | 1595 | } |
1561 | 1596 | ||
1562 | /* Stop processing this chunk. */ | 1597 | /* Stop processing this chunk. */ |
@@ -1574,7 +1609,7 @@ static int sctp_process_inv_mandatory(const struct sctp_association *asoc, | |||
1574 | *errp = sctp_make_op_error_space(asoc, chunk, 0); | 1609 | *errp = sctp_make_op_error_space(asoc, chunk, 0); |
1575 | 1610 | ||
1576 | if (*errp) | 1611 | if (*errp) |
1577 | sctp_init_cause(*errp, SCTP_ERROR_INV_PARAM, NULL, 0); | 1612 | sctp_init_cause(*errp, SCTP_ERROR_INV_PARAM, 0); |
1578 | 1613 | ||
1579 | /* Stop processing this chunk. */ | 1614 | /* Stop processing this chunk. */ |
1580 | return 0; | 1615 | return 0; |
@@ -1595,9 +1630,10 @@ static int sctp_process_inv_paramlength(const struct sctp_association *asoc, | |||
1595 | *errp = sctp_make_op_error_space(asoc, chunk, payload_len); | 1630 | *errp = sctp_make_op_error_space(asoc, chunk, payload_len); |
1596 | 1631 | ||
1597 | if (*errp) { | 1632 | if (*errp) { |
1598 | sctp_init_cause(*errp, SCTP_ERROR_PROTO_VIOLATION, error, | 1633 | sctp_init_cause(*errp, SCTP_ERROR_PROTO_VIOLATION, |
1599 | sizeof(error)); | 1634 | sizeof(error) + sizeof(sctp_paramhdr_t)); |
1600 | sctp_addto_chunk(*errp, sizeof(sctp_paramhdr_t), param); | 1635 | sctp_addto_chunk(*errp, sizeof(error), error); |
1636 | sctp_addto_param(*errp, sizeof(sctp_paramhdr_t), param); | ||
1601 | } | 1637 | } |
1602 | 1638 | ||
1603 | return 0; | 1639 | return 0; |
@@ -1618,9 +1654,10 @@ static int sctp_process_hn_param(const struct sctp_association *asoc, | |||
1618 | if (!*errp) | 1654 | if (!*errp) |
1619 | *errp = sctp_make_op_error_space(asoc, chunk, len); | 1655 | *errp = sctp_make_op_error_space(asoc, chunk, len); |
1620 | 1656 | ||
1621 | if (*errp) | 1657 | if (*errp) { |
1622 | sctp_init_cause(*errp, SCTP_ERROR_DNS_FAILED, | 1658 | sctp_init_cause(*errp, SCTP_ERROR_DNS_FAILED, len); |
1623 | param.v, len); | 1659 | sctp_addto_chunk(*errp, len, param.v); |
1660 | } | ||
1624 | 1661 | ||
1625 | /* Stop processing this chunk. */ | 1662 | /* Stop processing this chunk. */ |
1626 | return 0; | 1663 | return 0; |
@@ -1672,10 +1709,13 @@ static int sctp_process_unk_param(const struct sctp_association *asoc, | |||
1672 | *errp = sctp_make_op_error_space(asoc, chunk, | 1709 | *errp = sctp_make_op_error_space(asoc, chunk, |
1673 | ntohs(chunk->chunk_hdr->length)); | 1710 | ntohs(chunk->chunk_hdr->length)); |
1674 | 1711 | ||
1675 | if (*errp) | 1712 | if (*errp) { |
1676 | sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM, | 1713 | sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM, |
1677 | param.v, | ||
1678 | WORD_ROUND(ntohs(param.p->length))); | 1714 | WORD_ROUND(ntohs(param.p->length))); |
1715 | sctp_addto_chunk(*errp, | ||
1716 | WORD_ROUND(ntohs(param.p->length)), | ||
1717 | param.v); | ||
1718 | } | ||
1679 | 1719 | ||
1680 | break; | 1720 | break; |
1681 | case SCTP_PARAM_ACTION_SKIP: | 1721 | case SCTP_PARAM_ACTION_SKIP: |
@@ -1690,8 +1730,10 @@ static int sctp_process_unk_param(const struct sctp_association *asoc, | |||
1690 | 1730 | ||
1691 | if (*errp) { | 1731 | if (*errp) { |
1692 | sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM, | 1732 | sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM, |
1693 | param.v, | ||
1694 | WORD_ROUND(ntohs(param.p->length))); | 1733 | WORD_ROUND(ntohs(param.p->length))); |
1734 | sctp_addto_chunk(*errp, | ||
1735 | WORD_ROUND(ntohs(param.p->length)), | ||
1736 | param.v); | ||
1695 | } else { | 1737 | } else { |
1696 | /* If there is no memory for generating the ERROR | 1738 | /* If there is no memory for generating the ERROR |
1697 | * report as specified, an ABORT will be triggered | 1739 | * report as specified, an ABORT will be triggered |
@@ -1791,7 +1833,7 @@ int sctp_verify_init(const struct sctp_association *asoc, | |||
1791 | * VIOLATION error. We build the ERROR chunk here and let the normal | 1833 | * VIOLATION error. We build the ERROR chunk here and let the normal |
1792 | * error handling code build and send the packet. | 1834 | * error handling code build and send the packet. |
1793 | */ | 1835 | */ |
1794 | if (param.v < (void*)chunk->chunk_end - sizeof(sctp_paramhdr_t)) { | 1836 | if (param.v != (void*)chunk->chunk_end) { |
1795 | sctp_process_inv_paramlength(asoc, param.p, chunk, errp); | 1837 | sctp_process_inv_paramlength(asoc, param.p, chunk, errp); |
1796 | return 0; | 1838 | return 0; |
1797 | } | 1839 | } |
@@ -2457,6 +2499,52 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc, | |||
2457 | return SCTP_ERROR_NO_ERROR; | 2499 | return SCTP_ERROR_NO_ERROR; |
2458 | } | 2500 | } |
2459 | 2501 | ||
2502 | /* Verify the ASCONF packet before we process it. */ | ||
2503 | int sctp_verify_asconf(const struct sctp_association *asoc, | ||
2504 | struct sctp_paramhdr *param_hdr, void *chunk_end, | ||
2505 | struct sctp_paramhdr **errp) { | ||
2506 | sctp_addip_param_t *asconf_param; | ||
2507 | union sctp_params param; | ||
2508 | int length, plen; | ||
2509 | |||
2510 | param.v = (sctp_paramhdr_t *) param_hdr; | ||
2511 | while (param.v <= chunk_end - sizeof(sctp_paramhdr_t)) { | ||
2512 | length = ntohs(param.p->length); | ||
2513 | *errp = param.p; | ||
2514 | |||
2515 | if (param.v > chunk_end - length || | ||
2516 | length < sizeof(sctp_paramhdr_t)) | ||
2517 | return 0; | ||
2518 | |||
2519 | switch (param.p->type) { | ||
2520 | case SCTP_PARAM_ADD_IP: | ||
2521 | case SCTP_PARAM_DEL_IP: | ||
2522 | case SCTP_PARAM_SET_PRIMARY: | ||
2523 | asconf_param = (sctp_addip_param_t *)param.v; | ||
2524 | plen = ntohs(asconf_param->param_hdr.length); | ||
2525 | if (plen < sizeof(sctp_addip_param_t) + | ||
2526 | sizeof(sctp_paramhdr_t)) | ||
2527 | return 0; | ||
2528 | break; | ||
2529 | case SCTP_PARAM_SUCCESS_REPORT: | ||
2530 | case SCTP_PARAM_ADAPTATION_LAYER_IND: | ||
2531 | if (length != sizeof(sctp_addip_param_t)) | ||
2532 | return 0; | ||
2533 | |||
2534 | break; | ||
2535 | default: | ||
2536 | break; | ||
2537 | } | ||
2538 | |||
2539 | param.v += WORD_ROUND(length); | ||
2540 | } | ||
2541 | |||
2542 | if (param.v != chunk_end) | ||
2543 | return 0; | ||
2544 | |||
2545 | return 1; | ||
2546 | } | ||
2547 | |||
2460 | /* Process an incoming ASCONF chunk with the next expected serial no. and | 2548 | /* Process an incoming ASCONF chunk with the next expected serial no. and |
2461 | * return an ASCONF_ACK chunk to be sent in response. | 2549 | * return an ASCONF_ACK chunk to be sent in response. |
2462 | */ | 2550 | */ |
@@ -2571,22 +2659,16 @@ static int sctp_asconf_param_success(struct sctp_association *asoc, | |||
2571 | 2659 | ||
2572 | switch (asconf_param->param_hdr.type) { | 2660 | switch (asconf_param->param_hdr.type) { |
2573 | case SCTP_PARAM_ADD_IP: | 2661 | case SCTP_PARAM_ADD_IP: |
2574 | sctp_local_bh_disable(); | 2662 | /* This is always done in BH context with a socket lock |
2575 | sctp_write_lock(&asoc->base.addr_lock); | 2663 | * held, so the list can not change. |
2576 | list_for_each(pos, &bp->address_list) { | 2664 | */ |
2577 | saddr = list_entry(pos, struct sctp_sockaddr_entry, list); | 2665 | list_for_each_entry(saddr, &bp->address_list, list) { |
2578 | if (sctp_cmp_addr_exact(&saddr->a, &addr)) | 2666 | if (sctp_cmp_addr_exact(&saddr->a, &addr)) |
2579 | saddr->use_as_src = 1; | 2667 | saddr->use_as_src = 1; |
2580 | } | 2668 | } |
2581 | sctp_write_unlock(&asoc->base.addr_lock); | ||
2582 | sctp_local_bh_enable(); | ||
2583 | break; | 2669 | break; |
2584 | case SCTP_PARAM_DEL_IP: | 2670 | case SCTP_PARAM_DEL_IP: |
2585 | sctp_local_bh_disable(); | 2671 | retval = sctp_del_bind_addr(bp, &addr, call_rcu_bh); |
2586 | sctp_write_lock(&asoc->base.addr_lock); | ||
2587 | retval = sctp_del_bind_addr(bp, &addr); | ||
2588 | sctp_write_unlock(&asoc->base.addr_lock); | ||
2589 | sctp_local_bh_enable(); | ||
2590 | list_for_each(pos, &asoc->peer.transport_addr_list) { | 2672 | list_for_each(pos, &asoc->peer.transport_addr_list) { |
2591 | transport = list_entry(pos, struct sctp_transport, | 2673 | transport = list_entry(pos, struct sctp_transport, |
2592 | transports); | 2674 | transports); |
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index d9fad4f6ffc3..8d7890083493 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c | |||
@@ -1013,8 +1013,9 @@ static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype, | |||
1013 | break; | 1013 | break; |
1014 | 1014 | ||
1015 | case SCTP_DISPOSITION_VIOLATION: | 1015 | case SCTP_DISPOSITION_VIOLATION: |
1016 | printk(KERN_ERR "sctp protocol violation state %d " | 1016 | if (net_ratelimit()) |
1017 | "chunkid %d\n", state, subtype.chunk); | 1017 | printk(KERN_ERR "sctp protocol violation state %d " |
1018 | "chunkid %d\n", state, subtype.chunk); | ||
1018 | break; | 1019 | break; |
1019 | 1020 | ||
1020 | case SCTP_DISPOSITION_NOT_IMPL: | 1021 | case SCTP_DISPOSITION_NOT_IMPL: |
@@ -1130,6 +1131,9 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, | |||
1130 | /* Move the Cumulattive TSN Ack ahead. */ | 1131 | /* Move the Cumulattive TSN Ack ahead. */ |
1131 | sctp_tsnmap_skip(&asoc->peer.tsn_map, cmd->obj.u32); | 1132 | sctp_tsnmap_skip(&asoc->peer.tsn_map, cmd->obj.u32); |
1132 | 1133 | ||
1134 | /* purge the fragmentation queue */ | ||
1135 | sctp_ulpq_reasm_flushtsn(&asoc->ulpq, cmd->obj.u32); | ||
1136 | |||
1133 | /* Abort any in progress partial delivery. */ | 1137 | /* Abort any in progress partial delivery. */ |
1134 | sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC); | 1138 | sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC); |
1135 | break; | 1139 | break; |
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 71cad56dd73f..a583d67cab63 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c | |||
@@ -90,6 +90,11 @@ static sctp_disposition_t sctp_sf_shut_8_4_5(const struct sctp_endpoint *ep, | |||
90 | const sctp_subtype_t type, | 90 | const sctp_subtype_t type, |
91 | void *arg, | 91 | void *arg, |
92 | sctp_cmd_seq_t *commands); | 92 | sctp_cmd_seq_t *commands); |
93 | static sctp_disposition_t sctp_sf_tabort_8_4_8(const struct sctp_endpoint *ep, | ||
94 | const struct sctp_association *asoc, | ||
95 | const sctp_subtype_t type, | ||
96 | void *arg, | ||
97 | sctp_cmd_seq_t *commands); | ||
93 | static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk); | 98 | static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk); |
94 | 99 | ||
95 | static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands, | 100 | static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands, |
@@ -98,6 +103,7 @@ static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands, | |||
98 | struct sctp_transport *transport); | 103 | struct sctp_transport *transport); |
99 | 104 | ||
100 | static sctp_disposition_t sctp_sf_abort_violation( | 105 | static sctp_disposition_t sctp_sf_abort_violation( |
106 | const struct sctp_endpoint *ep, | ||
101 | const struct sctp_association *asoc, | 107 | const struct sctp_association *asoc, |
102 | void *arg, | 108 | void *arg, |
103 | sctp_cmd_seq_t *commands, | 109 | sctp_cmd_seq_t *commands, |
@@ -111,6 +117,13 @@ static sctp_disposition_t sctp_sf_violation_chunklen( | |||
111 | void *arg, | 117 | void *arg, |
112 | sctp_cmd_seq_t *commands); | 118 | sctp_cmd_seq_t *commands); |
113 | 119 | ||
120 | static sctp_disposition_t sctp_sf_violation_paramlen( | ||
121 | const struct sctp_endpoint *ep, | ||
122 | const struct sctp_association *asoc, | ||
123 | const sctp_subtype_t type, | ||
124 | void *arg, | ||
125 | sctp_cmd_seq_t *commands); | ||
126 | |||
114 | static sctp_disposition_t sctp_sf_violation_ctsn( | 127 | static sctp_disposition_t sctp_sf_violation_ctsn( |
115 | const struct sctp_endpoint *ep, | 128 | const struct sctp_endpoint *ep, |
116 | const struct sctp_association *asoc, | 129 | const struct sctp_association *asoc, |
@@ -118,6 +131,13 @@ static sctp_disposition_t sctp_sf_violation_ctsn( | |||
118 | void *arg, | 131 | void *arg, |
119 | sctp_cmd_seq_t *commands); | 132 | sctp_cmd_seq_t *commands); |
120 | 133 | ||
134 | static sctp_disposition_t sctp_sf_violation_chunk( | ||
135 | const struct sctp_endpoint *ep, | ||
136 | const struct sctp_association *asoc, | ||
137 | const sctp_subtype_t type, | ||
138 | void *arg, | ||
139 | sctp_cmd_seq_t *commands); | ||
140 | |||
121 | /* Small helper function that checks if the chunk length | 141 | /* Small helper function that checks if the chunk length |
122 | * is of the appropriate length. The 'required_length' argument | 142 | * is of the appropriate length. The 'required_length' argument |
123 | * is set to be the size of a specific chunk we are testing. | 143 | * is set to be the size of a specific chunk we are testing. |
@@ -181,16 +201,21 @@ sctp_disposition_t sctp_sf_do_4_C(const struct sctp_endpoint *ep, | |||
181 | struct sctp_chunk *chunk = arg; | 201 | struct sctp_chunk *chunk = arg; |
182 | struct sctp_ulpevent *ev; | 202 | struct sctp_ulpevent *ev; |
183 | 203 | ||
204 | if (!sctp_vtag_verify_either(chunk, asoc)) | ||
205 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | ||
206 | |||
184 | /* RFC 2960 6.10 Bundling | 207 | /* RFC 2960 6.10 Bundling |
185 | * | 208 | * |
186 | * An endpoint MUST NOT bundle INIT, INIT ACK or | 209 | * An endpoint MUST NOT bundle INIT, INIT ACK or |
187 | * SHUTDOWN COMPLETE with any other chunks. | 210 | * SHUTDOWN COMPLETE with any other chunks. |
188 | */ | 211 | */ |
189 | if (!chunk->singleton) | 212 | if (!chunk->singleton) |
190 | return SCTP_DISPOSITION_VIOLATION; | 213 | return sctp_sf_violation_chunk(ep, asoc, type, arg, commands); |
191 | 214 | ||
192 | if (!sctp_vtag_verify_either(chunk, asoc)) | 215 | /* Make sure that the SHUTDOWN_COMPLETE chunk has a valid length. */ |
193 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | 216 | if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) |
217 | return sctp_sf_violation_chunklen(ep, asoc, type, arg, | ||
218 | commands); | ||
194 | 219 | ||
195 | /* RFC 2960 10.2 SCTP-to-ULP | 220 | /* RFC 2960 10.2 SCTP-to-ULP |
196 | * | 221 | * |
@@ -264,7 +289,6 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep, | |||
264 | struct sctp_chunk *err_chunk; | 289 | struct sctp_chunk *err_chunk; |
265 | struct sctp_packet *packet; | 290 | struct sctp_packet *packet; |
266 | sctp_unrecognized_param_t *unk_param; | 291 | sctp_unrecognized_param_t *unk_param; |
267 | struct sock *sk; | ||
268 | int len; | 292 | int len; |
269 | 293 | ||
270 | /* 6.10 Bundling | 294 | /* 6.10 Bundling |
@@ -285,16 +309,6 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep, | |||
285 | if (ep == sctp_sk((sctp_get_ctl_sock()))->ep) | 309 | if (ep == sctp_sk((sctp_get_ctl_sock()))->ep) |
286 | return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands); | 310 | return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands); |
287 | 311 | ||
288 | sk = ep->base.sk; | ||
289 | /* If the endpoint is not listening or if the number of associations | ||
290 | * on the TCP-style socket exceed the max backlog, respond with an | ||
291 | * ABORT. | ||
292 | */ | ||
293 | if (!sctp_sstate(sk, LISTENING) || | ||
294 | (sctp_style(sk, TCP) && | ||
295 | sk_acceptq_is_full(sk))) | ||
296 | return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands); | ||
297 | |||
298 | /* 3.1 A packet containing an INIT chunk MUST have a zero Verification | 312 | /* 3.1 A packet containing an INIT chunk MUST have a zero Verification |
299 | * Tag. | 313 | * Tag. |
300 | */ | 314 | */ |
@@ -461,17 +475,17 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep, | |||
461 | if (!sctp_vtag_verify(chunk, asoc)) | 475 | if (!sctp_vtag_verify(chunk, asoc)) |
462 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | 476 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); |
463 | 477 | ||
464 | /* Make sure that the INIT-ACK chunk has a valid length */ | ||
465 | if (!sctp_chunk_length_valid(chunk, sizeof(sctp_initack_chunk_t))) | ||
466 | return sctp_sf_violation_chunklen(ep, asoc, type, arg, | ||
467 | commands); | ||
468 | /* 6.10 Bundling | 478 | /* 6.10 Bundling |
469 | * An endpoint MUST NOT bundle INIT, INIT ACK or | 479 | * An endpoint MUST NOT bundle INIT, INIT ACK or |
470 | * SHUTDOWN COMPLETE with any other chunks. | 480 | * SHUTDOWN COMPLETE with any other chunks. |
471 | */ | 481 | */ |
472 | if (!chunk->singleton) | 482 | if (!chunk->singleton) |
473 | return SCTP_DISPOSITION_VIOLATION; | 483 | return sctp_sf_violation_chunk(ep, asoc, type, arg, commands); |
474 | 484 | ||
485 | /* Make sure that the INIT-ACK chunk has a valid length */ | ||
486 | if (!sctp_chunk_length_valid(chunk, sizeof(sctp_initack_chunk_t))) | ||
487 | return sctp_sf_violation_chunklen(ep, asoc, type, arg, | ||
488 | commands); | ||
475 | /* Grab the INIT header. */ | 489 | /* Grab the INIT header. */ |
476 | chunk->subh.init_hdr = (sctp_inithdr_t *) chunk->skb->data; | 490 | chunk->subh.init_hdr = (sctp_inithdr_t *) chunk->skb->data; |
477 | 491 | ||
@@ -590,12 +604,13 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep, | |||
590 | struct sctp_ulpevent *ev, *ai_ev = NULL; | 604 | struct sctp_ulpevent *ev, *ai_ev = NULL; |
591 | int error = 0; | 605 | int error = 0; |
592 | struct sctp_chunk *err_chk_p; | 606 | struct sctp_chunk *err_chk_p; |
607 | struct sock *sk; | ||
593 | 608 | ||
594 | /* If the packet is an OOTB packet which is temporarily on the | 609 | /* If the packet is an OOTB packet which is temporarily on the |
595 | * control endpoint, respond with an ABORT. | 610 | * control endpoint, respond with an ABORT. |
596 | */ | 611 | */ |
597 | if (ep == sctp_sk((sctp_get_ctl_sock()))->ep) | 612 | if (ep == sctp_sk((sctp_get_ctl_sock()))->ep) |
598 | return sctp_sf_ootb(ep, asoc, type, arg, commands); | 613 | return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands); |
599 | 614 | ||
600 | /* Make sure that the COOKIE_ECHO chunk has a valid length. | 615 | /* Make sure that the COOKIE_ECHO chunk has a valid length. |
601 | * In this case, we check that we have enough for at least a | 616 | * In this case, we check that we have enough for at least a |
@@ -605,6 +620,15 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep, | |||
605 | if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) | 620 | if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) |
606 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | 621 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); |
607 | 622 | ||
623 | /* If the endpoint is not listening or if the number of associations | ||
624 | * on the TCP-style socket exceed the max backlog, respond with an | ||
625 | * ABORT. | ||
626 | */ | ||
627 | sk = ep->base.sk; | ||
628 | if (!sctp_sstate(sk, LISTENING) || | ||
629 | (sctp_style(sk, TCP) && sk_acceptq_is_full(sk))) | ||
630 | return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands); | ||
631 | |||
608 | /* "Decode" the chunk. We have no optional parameters so we | 632 | /* "Decode" the chunk. We have no optional parameters so we |
609 | * are in good shape. | 633 | * are in good shape. |
610 | */ | 634 | */ |
@@ -1032,19 +1056,21 @@ sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep, | |||
1032 | /* This should never happen, but lets log it if so. */ | 1056 | /* This should never happen, but lets log it if so. */ |
1033 | if (unlikely(!link)) { | 1057 | if (unlikely(!link)) { |
1034 | if (from_addr.sa.sa_family == AF_INET6) { | 1058 | if (from_addr.sa.sa_family == AF_INET6) { |
1035 | printk(KERN_WARNING | 1059 | if (net_ratelimit()) |
1036 | "%s association %p could not find address " | 1060 | printk(KERN_WARNING |
1037 | NIP6_FMT "\n", | 1061 | "%s association %p could not find address " |
1038 | __FUNCTION__, | 1062 | NIP6_FMT "\n", |
1039 | asoc, | 1063 | __FUNCTION__, |
1040 | NIP6(from_addr.v6.sin6_addr)); | 1064 | asoc, |
1065 | NIP6(from_addr.v6.sin6_addr)); | ||
1041 | } else { | 1066 | } else { |
1042 | printk(KERN_WARNING | 1067 | if (net_ratelimit()) |
1043 | "%s association %p could not find address " | 1068 | printk(KERN_WARNING |
1044 | NIPQUAD_FMT "\n", | 1069 | "%s association %p could not find address " |
1045 | __FUNCTION__, | 1070 | NIPQUAD_FMT "\n", |
1046 | asoc, | 1071 | __FUNCTION__, |
1047 | NIPQUAD(from_addr.v4.sin_addr.s_addr)); | 1072 | asoc, |
1073 | NIPQUAD(from_addr.v4.sin_addr.s_addr)); | ||
1048 | } | 1074 | } |
1049 | return SCTP_DISPOSITION_DISCARD; | 1075 | return SCTP_DISPOSITION_DISCARD; |
1050 | } | 1076 | } |
@@ -2495,6 +2521,11 @@ sctp_disposition_t sctp_sf_do_9_2_reshutack(const struct sctp_endpoint *ep, | |||
2495 | struct sctp_chunk *chunk = (struct sctp_chunk *) arg; | 2521 | struct sctp_chunk *chunk = (struct sctp_chunk *) arg; |
2496 | struct sctp_chunk *reply; | 2522 | struct sctp_chunk *reply; |
2497 | 2523 | ||
2524 | /* Make sure that the chunk has a valid length */ | ||
2525 | if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) | ||
2526 | return sctp_sf_violation_chunklen(ep, asoc, type, arg, | ||
2527 | commands); | ||
2528 | |||
2498 | /* Since we are not going to really process this INIT, there | 2529 | /* Since we are not going to really process this INIT, there |
2499 | * is no point in verifying chunk boundries. Just generate | 2530 | * is no point in verifying chunk boundries. Just generate |
2500 | * the SHUTDOWN ACK. | 2531 | * the SHUTDOWN ACK. |
@@ -2928,7 +2959,7 @@ sctp_disposition_t sctp_sf_eat_sack_6_2(const struct sctp_endpoint *ep, | |||
2928 | * | 2959 | * |
2929 | * The return value is the disposition of the chunk. | 2960 | * The return value is the disposition of the chunk. |
2930 | */ | 2961 | */ |
2931 | sctp_disposition_t sctp_sf_tabort_8_4_8(const struct sctp_endpoint *ep, | 2962 | static sctp_disposition_t sctp_sf_tabort_8_4_8(const struct sctp_endpoint *ep, |
2932 | const struct sctp_association *asoc, | 2963 | const struct sctp_association *asoc, |
2933 | const sctp_subtype_t type, | 2964 | const sctp_subtype_t type, |
2934 | void *arg, | 2965 | void *arg, |
@@ -2964,6 +2995,7 @@ sctp_disposition_t sctp_sf_tabort_8_4_8(const struct sctp_endpoint *ep, | |||
2964 | 2995 | ||
2965 | SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); | 2996 | SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); |
2966 | 2997 | ||
2998 | sctp_sf_pdiscard(ep, asoc, type, arg, commands); | ||
2967 | return SCTP_DISPOSITION_CONSUME; | 2999 | return SCTP_DISPOSITION_CONSUME; |
2968 | } | 3000 | } |
2969 | 3001 | ||
@@ -3124,14 +3156,14 @@ sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep, | |||
3124 | 3156 | ||
3125 | ch = (sctp_chunkhdr_t *) chunk->chunk_hdr; | 3157 | ch = (sctp_chunkhdr_t *) chunk->chunk_hdr; |
3126 | do { | 3158 | do { |
3127 | /* Break out if chunk length is less then minimal. */ | 3159 | /* Report violation if the chunk is less then minimal */ |
3128 | if (ntohs(ch->length) < sizeof(sctp_chunkhdr_t)) | 3160 | if (ntohs(ch->length) < sizeof(sctp_chunkhdr_t)) |
3129 | break; | 3161 | return sctp_sf_violation_chunklen(ep, asoc, type, arg, |
3130 | 3162 | commands); | |
3131 | ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length)); | ||
3132 | if (ch_end > skb_tail_pointer(skb)) | ||
3133 | break; | ||
3134 | 3163 | ||
3164 | /* Now that we know we at least have a chunk header, | ||
3165 | * do things that are type appropriate. | ||
3166 | */ | ||
3135 | if (SCTP_CID_SHUTDOWN_ACK == ch->type) | 3167 | if (SCTP_CID_SHUTDOWN_ACK == ch->type) |
3136 | ootb_shut_ack = 1; | 3168 | ootb_shut_ack = 1; |
3137 | 3169 | ||
@@ -3143,15 +3175,19 @@ sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep, | |||
3143 | if (SCTP_CID_ABORT == ch->type) | 3175 | if (SCTP_CID_ABORT == ch->type) |
3144 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | 3176 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); |
3145 | 3177 | ||
3178 | /* Report violation if chunk len overflows */ | ||
3179 | ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length)); | ||
3180 | if (ch_end > skb_tail_pointer(skb)) | ||
3181 | return sctp_sf_violation_chunklen(ep, asoc, type, arg, | ||
3182 | commands); | ||
3183 | |||
3146 | ch = (sctp_chunkhdr_t *) ch_end; | 3184 | ch = (sctp_chunkhdr_t *) ch_end; |
3147 | } while (ch_end < skb_tail_pointer(skb)); | 3185 | } while (ch_end < skb_tail_pointer(skb)); |
3148 | 3186 | ||
3149 | if (ootb_shut_ack) | 3187 | if (ootb_shut_ack) |
3150 | sctp_sf_shut_8_4_5(ep, asoc, type, arg, commands); | 3188 | return sctp_sf_shut_8_4_5(ep, asoc, type, arg, commands); |
3151 | else | 3189 | else |
3152 | sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands); | 3190 | return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands); |
3153 | |||
3154 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | ||
3155 | } | 3191 | } |
3156 | 3192 | ||
3157 | /* | 3193 | /* |
@@ -3217,7 +3253,11 @@ static sctp_disposition_t sctp_sf_shut_8_4_5(const struct sctp_endpoint *ep, | |||
3217 | if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) | 3253 | if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) |
3218 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | 3254 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); |
3219 | 3255 | ||
3220 | return SCTP_DISPOSITION_CONSUME; | 3256 | /* We need to discard the rest of the packet to prevent |
3257 | * potential bomming attacks from additional bundled chunks. | ||
3258 | * This is documented in SCTP Threats ID. | ||
3259 | */ | ||
3260 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | ||
3221 | } | 3261 | } |
3222 | 3262 | ||
3223 | return SCTP_DISPOSITION_NOMEM; | 3263 | return SCTP_DISPOSITION_NOMEM; |
@@ -3240,6 +3280,13 @@ sctp_disposition_t sctp_sf_do_8_5_1_E_sa(const struct sctp_endpoint *ep, | |||
3240 | void *arg, | 3280 | void *arg, |
3241 | sctp_cmd_seq_t *commands) | 3281 | sctp_cmd_seq_t *commands) |
3242 | { | 3282 | { |
3283 | struct sctp_chunk *chunk = arg; | ||
3284 | |||
3285 | /* Make sure that the SHUTDOWN_ACK chunk has a valid length. */ | ||
3286 | if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) | ||
3287 | return sctp_sf_violation_chunklen(ep, asoc, type, arg, | ||
3288 | commands); | ||
3289 | |||
3243 | /* Although we do have an association in this case, it corresponds | 3290 | /* Although we do have an association in this case, it corresponds |
3244 | * to a restarted association. So the packet is treated as an OOTB | 3291 | * to a restarted association. So the packet is treated as an OOTB |
3245 | * packet and the state function that handles OOTB SHUTDOWN_ACK is | 3292 | * packet and the state function that handles OOTB SHUTDOWN_ACK is |
@@ -3256,8 +3303,11 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep, | |||
3256 | { | 3303 | { |
3257 | struct sctp_chunk *chunk = arg; | 3304 | struct sctp_chunk *chunk = arg; |
3258 | struct sctp_chunk *asconf_ack = NULL; | 3305 | struct sctp_chunk *asconf_ack = NULL; |
3306 | struct sctp_paramhdr *err_param = NULL; | ||
3259 | sctp_addiphdr_t *hdr; | 3307 | sctp_addiphdr_t *hdr; |
3308 | union sctp_addr_param *addr_param; | ||
3260 | __u32 serial; | 3309 | __u32 serial; |
3310 | int length; | ||
3261 | 3311 | ||
3262 | if (!sctp_vtag_verify(chunk, asoc)) { | 3312 | if (!sctp_vtag_verify(chunk, asoc)) { |
3263 | sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG, | 3313 | sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG, |
@@ -3273,6 +3323,20 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep, | |||
3273 | hdr = (sctp_addiphdr_t *)chunk->skb->data; | 3323 | hdr = (sctp_addiphdr_t *)chunk->skb->data; |
3274 | serial = ntohl(hdr->serial); | 3324 | serial = ntohl(hdr->serial); |
3275 | 3325 | ||
3326 | addr_param = (union sctp_addr_param *)hdr->params; | ||
3327 | length = ntohs(addr_param->p.length); | ||
3328 | if (length < sizeof(sctp_paramhdr_t)) | ||
3329 | return sctp_sf_violation_paramlen(ep, asoc, type, | ||
3330 | (void *)addr_param, commands); | ||
3331 | |||
3332 | /* Verify the ASCONF chunk before processing it. */ | ||
3333 | if (!sctp_verify_asconf(asoc, | ||
3334 | (sctp_paramhdr_t *)((void *)addr_param + length), | ||
3335 | (void *)chunk->chunk_end, | ||
3336 | &err_param)) | ||
3337 | return sctp_sf_violation_paramlen(ep, asoc, type, | ||
3338 | (void *)&err_param, commands); | ||
3339 | |||
3276 | /* ADDIP 4.2 C1) Compare the value of the serial number to the value | 3340 | /* ADDIP 4.2 C1) Compare the value of the serial number to the value |
3277 | * the endpoint stored in a new association variable | 3341 | * the endpoint stored in a new association variable |
3278 | * 'Peer-Serial-Number'. | 3342 | * 'Peer-Serial-Number'. |
@@ -3327,6 +3391,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep, | |||
3327 | struct sctp_chunk *asconf_ack = arg; | 3391 | struct sctp_chunk *asconf_ack = arg; |
3328 | struct sctp_chunk *last_asconf = asoc->addip_last_asconf; | 3392 | struct sctp_chunk *last_asconf = asoc->addip_last_asconf; |
3329 | struct sctp_chunk *abort; | 3393 | struct sctp_chunk *abort; |
3394 | struct sctp_paramhdr *err_param = NULL; | ||
3330 | sctp_addiphdr_t *addip_hdr; | 3395 | sctp_addiphdr_t *addip_hdr; |
3331 | __u32 sent_serial, rcvd_serial; | 3396 | __u32 sent_serial, rcvd_serial; |
3332 | 3397 | ||
@@ -3344,6 +3409,14 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep, | |||
3344 | addip_hdr = (sctp_addiphdr_t *)asconf_ack->skb->data; | 3409 | addip_hdr = (sctp_addiphdr_t *)asconf_ack->skb->data; |
3345 | rcvd_serial = ntohl(addip_hdr->serial); | 3410 | rcvd_serial = ntohl(addip_hdr->serial); |
3346 | 3411 | ||
3412 | /* Verify the ASCONF-ACK chunk before processing it. */ | ||
3413 | if (!sctp_verify_asconf(asoc, | ||
3414 | (sctp_paramhdr_t *)addip_hdr->params, | ||
3415 | (void *)asconf_ack->chunk_end, | ||
3416 | &err_param)) | ||
3417 | return sctp_sf_violation_paramlen(ep, asoc, type, | ||
3418 | (void *)&err_param, commands); | ||
3419 | |||
3347 | if (last_asconf) { | 3420 | if (last_asconf) { |
3348 | addip_hdr = (sctp_addiphdr_t *)last_asconf->subh.addip_hdr; | 3421 | addip_hdr = (sctp_addiphdr_t *)last_asconf->subh.addip_hdr; |
3349 | sent_serial = ntohl(addip_hdr->serial); | 3422 | sent_serial = ntohl(addip_hdr->serial); |
@@ -3362,7 +3435,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep, | |||
3362 | abort = sctp_make_abort(asoc, asconf_ack, | 3435 | abort = sctp_make_abort(asoc, asconf_ack, |
3363 | sizeof(sctp_errhdr_t)); | 3436 | sizeof(sctp_errhdr_t)); |
3364 | if (abort) { | 3437 | if (abort) { |
3365 | sctp_init_cause(abort, SCTP_ERROR_ASCONF_ACK, NULL, 0); | 3438 | sctp_init_cause(abort, SCTP_ERROR_ASCONF_ACK, 0); |
3366 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, | 3439 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, |
3367 | SCTP_CHUNK(abort)); | 3440 | SCTP_CHUNK(abort)); |
3368 | } | 3441 | } |
@@ -3392,7 +3465,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep, | |||
3392 | abort = sctp_make_abort(asoc, asconf_ack, | 3465 | abort = sctp_make_abort(asoc, asconf_ack, |
3393 | sizeof(sctp_errhdr_t)); | 3466 | sizeof(sctp_errhdr_t)); |
3394 | if (abort) { | 3467 | if (abort) { |
3395 | sctp_init_cause(abort, SCTP_ERROR_RSRC_LOW, NULL, 0); | 3468 | sctp_init_cause(abort, SCTP_ERROR_RSRC_LOW, 0); |
3396 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, | 3469 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, |
3397 | SCTP_CHUNK(abort)); | 3470 | SCTP_CHUNK(abort)); |
3398 | } | 3471 | } |
@@ -3654,6 +3727,16 @@ sctp_disposition_t sctp_sf_discard_chunk(const struct sctp_endpoint *ep, | |||
3654 | void *arg, | 3727 | void *arg, |
3655 | sctp_cmd_seq_t *commands) | 3728 | sctp_cmd_seq_t *commands) |
3656 | { | 3729 | { |
3730 | struct sctp_chunk *chunk = arg; | ||
3731 | |||
3732 | /* Make sure that the chunk has a valid length. | ||
3733 | * Since we don't know the chunk type, we use a general | ||
3734 | * chunkhdr structure to make a comparison. | ||
3735 | */ | ||
3736 | if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) | ||
3737 | return sctp_sf_violation_chunklen(ep, asoc, type, arg, | ||
3738 | commands); | ||
3739 | |||
3657 | SCTP_DEBUG_PRINTK("Chunk %d is discarded\n", type.chunk); | 3740 | SCTP_DEBUG_PRINTK("Chunk %d is discarded\n", type.chunk); |
3658 | return SCTP_DISPOSITION_DISCARD; | 3741 | return SCTP_DISPOSITION_DISCARD; |
3659 | } | 3742 | } |
@@ -3709,6 +3792,13 @@ sctp_disposition_t sctp_sf_violation(const struct sctp_endpoint *ep, | |||
3709 | void *arg, | 3792 | void *arg, |
3710 | sctp_cmd_seq_t *commands) | 3793 | sctp_cmd_seq_t *commands) |
3711 | { | 3794 | { |
3795 | struct sctp_chunk *chunk = arg; | ||
3796 | |||
3797 | /* Make sure that the chunk has a valid length. */ | ||
3798 | if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) | ||
3799 | return sctp_sf_violation_chunklen(ep, asoc, type, arg, | ||
3800 | commands); | ||
3801 | |||
3712 | return SCTP_DISPOSITION_VIOLATION; | 3802 | return SCTP_DISPOSITION_VIOLATION; |
3713 | } | 3803 | } |
3714 | 3804 | ||
@@ -3716,12 +3806,14 @@ sctp_disposition_t sctp_sf_violation(const struct sctp_endpoint *ep, | |||
3716 | * Common function to handle a protocol violation. | 3806 | * Common function to handle a protocol violation. |
3717 | */ | 3807 | */ |
3718 | static sctp_disposition_t sctp_sf_abort_violation( | 3808 | static sctp_disposition_t sctp_sf_abort_violation( |
3809 | const struct sctp_endpoint *ep, | ||
3719 | const struct sctp_association *asoc, | 3810 | const struct sctp_association *asoc, |
3720 | void *arg, | 3811 | void *arg, |
3721 | sctp_cmd_seq_t *commands, | 3812 | sctp_cmd_seq_t *commands, |
3722 | const __u8 *payload, | 3813 | const __u8 *payload, |
3723 | const size_t paylen) | 3814 | const size_t paylen) |
3724 | { | 3815 | { |
3816 | struct sctp_packet *packet = NULL; | ||
3725 | struct sctp_chunk *chunk = arg; | 3817 | struct sctp_chunk *chunk = arg; |
3726 | struct sctp_chunk *abort = NULL; | 3818 | struct sctp_chunk *abort = NULL; |
3727 | 3819 | ||
@@ -3730,30 +3822,51 @@ static sctp_disposition_t sctp_sf_abort_violation( | |||
3730 | if (!abort) | 3822 | if (!abort) |
3731 | goto nomem; | 3823 | goto nomem; |
3732 | 3824 | ||
3733 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort)); | 3825 | if (asoc) { |
3734 | SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); | 3826 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort)); |
3827 | SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); | ||
3735 | 3828 | ||
3736 | if (asoc->state <= SCTP_STATE_COOKIE_ECHOED) { | 3829 | if (asoc->state <= SCTP_STATE_COOKIE_ECHOED) { |
3737 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, | 3830 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, |
3738 | SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); | 3831 | SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); |
3739 | sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, | 3832 | sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, |
3740 | SCTP_ERROR(ECONNREFUSED)); | 3833 | SCTP_ERROR(ECONNREFUSED)); |
3741 | sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, | 3834 | sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, |
3742 | SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION)); | 3835 | SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION)); |
3836 | } else { | ||
3837 | sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, | ||
3838 | SCTP_ERROR(ECONNABORTED)); | ||
3839 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, | ||
3840 | SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION)); | ||
3841 | SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); | ||
3842 | } | ||
3743 | } else { | 3843 | } else { |
3744 | sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, | 3844 | packet = sctp_ootb_pkt_new(asoc, chunk); |
3745 | SCTP_ERROR(ECONNABORTED)); | 3845 | |
3746 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, | 3846 | if (!packet) |
3747 | SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION)); | 3847 | goto nomem_pkt; |
3748 | SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); | 3848 | |
3849 | if (sctp_test_T_bit(abort)) | ||
3850 | packet->vtag = ntohl(chunk->sctp_hdr->vtag); | ||
3851 | |||
3852 | abort->skb->sk = ep->base.sk; | ||
3853 | |||
3854 | sctp_packet_append_chunk(packet, abort); | ||
3855 | |||
3856 | sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, | ||
3857 | SCTP_PACKET(packet)); | ||
3858 | |||
3859 | SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); | ||
3749 | } | 3860 | } |
3750 | 3861 | ||
3751 | sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL()); | 3862 | sctp_sf_pdiscard(ep, asoc, SCTP_ST_CHUNK(0), arg, commands); |
3752 | 3863 | ||
3753 | SCTP_INC_STATS(SCTP_MIB_ABORTEDS); | 3864 | SCTP_INC_STATS(SCTP_MIB_ABORTEDS); |
3754 | 3865 | ||
3755 | return SCTP_DISPOSITION_ABORT; | 3866 | return SCTP_DISPOSITION_ABORT; |
3756 | 3867 | ||
3868 | nomem_pkt: | ||
3869 | sctp_chunk_free(abort); | ||
3757 | nomem: | 3870 | nomem: |
3758 | return SCTP_DISPOSITION_NOMEM; | 3871 | return SCTP_DISPOSITION_NOMEM; |
3759 | } | 3872 | } |
@@ -3786,7 +3899,24 @@ static sctp_disposition_t sctp_sf_violation_chunklen( | |||
3786 | { | 3899 | { |
3787 | char err_str[]="The following chunk had invalid length:"; | 3900 | char err_str[]="The following chunk had invalid length:"; |
3788 | 3901 | ||
3789 | return sctp_sf_abort_violation(asoc, arg, commands, err_str, | 3902 | return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str, |
3903 | sizeof(err_str)); | ||
3904 | } | ||
3905 | |||
3906 | /* | ||
3907 | * Handle a protocol violation when the parameter length is invalid. | ||
3908 | * "Invalid" length is identified as smaller then the minimal length a | ||
3909 | * given parameter can be. | ||
3910 | */ | ||
3911 | static sctp_disposition_t sctp_sf_violation_paramlen( | ||
3912 | const struct sctp_endpoint *ep, | ||
3913 | const struct sctp_association *asoc, | ||
3914 | const sctp_subtype_t type, | ||
3915 | void *arg, | ||
3916 | sctp_cmd_seq_t *commands) { | ||
3917 | char err_str[] = "The following parameter had invalid length:"; | ||
3918 | |||
3919 | return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str, | ||
3790 | sizeof(err_str)); | 3920 | sizeof(err_str)); |
3791 | } | 3921 | } |
3792 | 3922 | ||
@@ -3805,10 +3935,31 @@ static sctp_disposition_t sctp_sf_violation_ctsn( | |||
3805 | { | 3935 | { |
3806 | char err_str[]="The cumulative tsn ack beyond the max tsn currently sent:"; | 3936 | char err_str[]="The cumulative tsn ack beyond the max tsn currently sent:"; |
3807 | 3937 | ||
3808 | return sctp_sf_abort_violation(asoc, arg, commands, err_str, | 3938 | return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str, |
3809 | sizeof(err_str)); | 3939 | sizeof(err_str)); |
3810 | } | 3940 | } |
3811 | 3941 | ||
3942 | /* Handle protocol violation of an invalid chunk bundling. For example, | ||
3943 | * when we have an association and we recieve bundled INIT-ACK, or | ||
3944 | * SHUDOWN-COMPLETE, our peer is clearly violationg the "MUST NOT bundle" | ||
3945 | * statement from the specs. Additinally, there might be an attacker | ||
3946 | * on the path and we may not want to continue this communication. | ||
3947 | */ | ||
3948 | static sctp_disposition_t sctp_sf_violation_chunk( | ||
3949 | const struct sctp_endpoint *ep, | ||
3950 | const struct sctp_association *asoc, | ||
3951 | const sctp_subtype_t type, | ||
3952 | void *arg, | ||
3953 | sctp_cmd_seq_t *commands) | ||
3954 | { | ||
3955 | char err_str[]="The following chunk violates protocol:"; | ||
3956 | |||
3957 | if (!asoc) | ||
3958 | return sctp_sf_violation(ep, asoc, type, arg, commands); | ||
3959 | |||
3960 | return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str, | ||
3961 | sizeof(err_str)); | ||
3962 | } | ||
3812 | /*************************************************************************** | 3963 | /*************************************************************************** |
3813 | * These are the state functions for handling primitive (Section 10) events. | 3964 | * These are the state functions for handling primitive (Section 10) events. |
3814 | ***************************************************************************/ | 3965 | ***************************************************************************/ |
@@ -5175,7 +5326,22 @@ static struct sctp_packet *sctp_ootb_pkt_new(const struct sctp_association *asoc | |||
5175 | * association exists, otherwise, use the peer's vtag. | 5326 | * association exists, otherwise, use the peer's vtag. |
5176 | */ | 5327 | */ |
5177 | if (asoc) { | 5328 | if (asoc) { |
5178 | vtag = asoc->peer.i.init_tag; | 5329 | /* Special case the INIT-ACK as there is no peer's vtag |
5330 | * yet. | ||
5331 | */ | ||
5332 | switch(chunk->chunk_hdr->type) { | ||
5333 | case SCTP_CID_INIT_ACK: | ||
5334 | { | ||
5335 | sctp_initack_chunk_t *initack; | ||
5336 | |||
5337 | initack = (sctp_initack_chunk_t *)chunk->chunk_hdr; | ||
5338 | vtag = ntohl(initack->init_hdr.init_tag); | ||
5339 | break; | ||
5340 | } | ||
5341 | default: | ||
5342 | vtag = asoc->peer.i.init_tag; | ||
5343 | break; | ||
5344 | } | ||
5179 | } else { | 5345 | } else { |
5180 | /* Special case the INIT and stale COOKIE_ECHO as there is no | 5346 | /* Special case the INIT and stale COOKIE_ECHO as there is no |
5181 | * vtag yet. | 5347 | * vtag yet. |
diff --git a/net/sctp/sm_statetable.c b/net/sctp/sm_statetable.c index 70a91ece3c49..ddb0ba3974b0 100644 --- a/net/sctp/sm_statetable.c +++ b/net/sctp/sm_statetable.c | |||
@@ -110,7 +110,7 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type, | |||
110 | /* SCTP_STATE_EMPTY */ \ | 110 | /* SCTP_STATE_EMPTY */ \ |
111 | TYPE_SCTP_FUNC(sctp_sf_ootb), \ | 111 | TYPE_SCTP_FUNC(sctp_sf_ootb), \ |
112 | /* SCTP_STATE_CLOSED */ \ | 112 | /* SCTP_STATE_CLOSED */ \ |
113 | TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \ | 113 | TYPE_SCTP_FUNC(sctp_sf_ootb), \ |
114 | /* SCTP_STATE_COOKIE_WAIT */ \ | 114 | /* SCTP_STATE_COOKIE_WAIT */ \ |
115 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ | 115 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
116 | /* SCTP_STATE_COOKIE_ECHOED */ \ | 116 | /* SCTP_STATE_COOKIE_ECHOED */ \ |
@@ -173,7 +173,7 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type, | |||
173 | /* SCTP_STATE_EMPTY */ \ | 173 | /* SCTP_STATE_EMPTY */ \ |
174 | TYPE_SCTP_FUNC(sctp_sf_ootb), \ | 174 | TYPE_SCTP_FUNC(sctp_sf_ootb), \ |
175 | /* SCTP_STATE_CLOSED */ \ | 175 | /* SCTP_STATE_CLOSED */ \ |
176 | TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \ | 176 | TYPE_SCTP_FUNC(sctp_sf_ootb), \ |
177 | /* SCTP_STATE_COOKIE_WAIT */ \ | 177 | /* SCTP_STATE_COOKIE_WAIT */ \ |
178 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ | 178 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
179 | /* SCTP_STATE_COOKIE_ECHOED */ \ | 179 | /* SCTP_STATE_COOKIE_ECHOED */ \ |
@@ -194,7 +194,7 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type, | |||
194 | /* SCTP_STATE_EMPTY */ \ | 194 | /* SCTP_STATE_EMPTY */ \ |
195 | TYPE_SCTP_FUNC(sctp_sf_ootb), \ | 195 | TYPE_SCTP_FUNC(sctp_sf_ootb), \ |
196 | /* SCTP_STATE_CLOSED */ \ | 196 | /* SCTP_STATE_CLOSED */ \ |
197 | TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \ | 197 | TYPE_SCTP_FUNC(sctp_sf_ootb), \ |
198 | /* SCTP_STATE_COOKIE_WAIT */ \ | 198 | /* SCTP_STATE_COOKIE_WAIT */ \ |
199 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ | 199 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
200 | /* SCTP_STATE_COOKIE_ECHOED */ \ | 200 | /* SCTP_STATE_COOKIE_ECHOED */ \ |
@@ -216,7 +216,7 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type, | |||
216 | /* SCTP_STATE_EMPTY */ \ | 216 | /* SCTP_STATE_EMPTY */ \ |
217 | TYPE_SCTP_FUNC(sctp_sf_ootb), \ | 217 | TYPE_SCTP_FUNC(sctp_sf_ootb), \ |
218 | /* SCTP_STATE_CLOSED */ \ | 218 | /* SCTP_STATE_CLOSED */ \ |
219 | TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \ | 219 | TYPE_SCTP_FUNC(sctp_sf_ootb), \ |
220 | /* SCTP_STATE_COOKIE_WAIT */ \ | 220 | /* SCTP_STATE_COOKIE_WAIT */ \ |
221 | TYPE_SCTP_FUNC(sctp_sf_violation), \ | 221 | TYPE_SCTP_FUNC(sctp_sf_violation), \ |
222 | /* SCTP_STATE_COOKIE_ECHOED */ \ | 222 | /* SCTP_STATE_COOKIE_ECHOED */ \ |
@@ -258,7 +258,7 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type, | |||
258 | /* SCTP_STATE_EMPTY */ \ | 258 | /* SCTP_STATE_EMPTY */ \ |
259 | TYPE_SCTP_FUNC(sctp_sf_ootb), \ | 259 | TYPE_SCTP_FUNC(sctp_sf_ootb), \ |
260 | /* SCTP_STATE_CLOSED */ \ | 260 | /* SCTP_STATE_CLOSED */ \ |
261 | TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \ | 261 | TYPE_SCTP_FUNC(sctp_sf_ootb), \ |
262 | /* SCTP_STATE_COOKIE_WAIT */ \ | 262 | /* SCTP_STATE_COOKIE_WAIT */ \ |
263 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ | 263 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
264 | /* SCTP_STATE_COOKIE_ECHOED */ \ | 264 | /* SCTP_STATE_COOKIE_ECHOED */ \ |
@@ -300,7 +300,7 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type, | |||
300 | /* SCTP_STATE_EMPTY */ \ | 300 | /* SCTP_STATE_EMPTY */ \ |
301 | TYPE_SCTP_FUNC(sctp_sf_ootb), \ | 301 | TYPE_SCTP_FUNC(sctp_sf_ootb), \ |
302 | /* SCTP_STATE_CLOSED */ \ | 302 | /* SCTP_STATE_CLOSED */ \ |
303 | TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \ | 303 | TYPE_SCTP_FUNC(sctp_sf_ootb), \ |
304 | /* SCTP_STATE_COOKIE_WAIT */ \ | 304 | /* SCTP_STATE_COOKIE_WAIT */ \ |
305 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ | 305 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
306 | /* SCTP_STATE_COOKIE_ECHOED */ \ | 306 | /* SCTP_STATE_COOKIE_ECHOED */ \ |
@@ -499,7 +499,7 @@ static const sctp_sm_table_entry_t addip_chunk_event_table[SCTP_NUM_ADDIP_CHUNK_ | |||
499 | /* SCTP_STATE_EMPTY */ \ | 499 | /* SCTP_STATE_EMPTY */ \ |
500 | TYPE_SCTP_FUNC(sctp_sf_ootb), \ | 500 | TYPE_SCTP_FUNC(sctp_sf_ootb), \ |
501 | /* SCTP_STATE_CLOSED */ \ | 501 | /* SCTP_STATE_CLOSED */ \ |
502 | TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \ | 502 | TYPE_SCTP_FUNC(sctp_sf_ootb), \ |
503 | /* SCTP_STATE_COOKIE_WAIT */ \ | 503 | /* SCTP_STATE_COOKIE_WAIT */ \ |
504 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ | 504 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
505 | /* SCTP_STATE_COOKIE_ECHOED */ \ | 505 | /* SCTP_STATE_COOKIE_ECHOED */ \ |
@@ -528,7 +528,7 @@ chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = { | |||
528 | /* SCTP_STATE_EMPTY */ | 528 | /* SCTP_STATE_EMPTY */ |
529 | TYPE_SCTP_FUNC(sctp_sf_ootb), | 529 | TYPE_SCTP_FUNC(sctp_sf_ootb), |
530 | /* SCTP_STATE_CLOSED */ | 530 | /* SCTP_STATE_CLOSED */ |
531 | TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), | 531 | TYPE_SCTP_FUNC(sctp_sf_ootb), |
532 | /* SCTP_STATE_COOKIE_WAIT */ | 532 | /* SCTP_STATE_COOKIE_WAIT */ |
533 | TYPE_SCTP_FUNC(sctp_sf_unk_chunk), | 533 | TYPE_SCTP_FUNC(sctp_sf_unk_chunk), |
534 | /* SCTP_STATE_COOKIE_ECHOED */ | 534 | /* SCTP_STATE_COOKIE_ECHOED */ |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 01c6364245b7..772fbfb4bfda 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -353,6 +353,7 @@ SCTP_STATIC int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len) | |||
353 | * The function sctp_get_port_local() does duplicate address | 353 | * The function sctp_get_port_local() does duplicate address |
354 | * detection. | 354 | * detection. |
355 | */ | 355 | */ |
356 | addr->v4.sin_port = htons(snum); | ||
356 | if ((ret = sctp_get_port_local(sk, addr))) { | 357 | if ((ret = sctp_get_port_local(sk, addr))) { |
357 | if (ret == (long) sk) { | 358 | if (ret == (long) sk) { |
358 | /* This endpoint has a conflicting address. */ | 359 | /* This endpoint has a conflicting address. */ |
@@ -366,14 +367,10 @@ SCTP_STATIC int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len) | |||
366 | if (!bp->port) | 367 | if (!bp->port) |
367 | bp->port = inet_sk(sk)->num; | 368 | bp->port = inet_sk(sk)->num; |
368 | 369 | ||
369 | /* Add the address to the bind address list. */ | 370 | /* Add the address to the bind address list. |
370 | sctp_local_bh_disable(); | 371 | * Use GFP_ATOMIC since BHs will be disabled. |
371 | sctp_write_lock(&ep->base.addr_lock); | 372 | */ |
372 | |||
373 | /* Use GFP_ATOMIC since BHs are disabled. */ | ||
374 | ret = sctp_add_bind_addr(bp, addr, 1, GFP_ATOMIC); | 373 | ret = sctp_add_bind_addr(bp, addr, 1, GFP_ATOMIC); |
375 | sctp_write_unlock(&ep->base.addr_lock); | ||
376 | sctp_local_bh_enable(); | ||
377 | 374 | ||
378 | /* Copy back into socket for getsockname() use. */ | 375 | /* Copy back into socket for getsockname() use. */ |
379 | if (!ret) { | 376 | if (!ret) { |
@@ -543,15 +540,12 @@ static int sctp_send_asconf_add_ip(struct sock *sk, | |||
543 | if (i < addrcnt) | 540 | if (i < addrcnt) |
544 | continue; | 541 | continue; |
545 | 542 | ||
546 | /* Use the first address in bind addr list of association as | 543 | /* Use the first valid address in bind addr list of |
547 | * Address Parameter of ASCONF CHUNK. | 544 | * association as Address Parameter of ASCONF CHUNK. |
548 | */ | 545 | */ |
549 | sctp_read_lock(&asoc->base.addr_lock); | ||
550 | bp = &asoc->base.bind_addr; | 546 | bp = &asoc->base.bind_addr; |
551 | p = bp->address_list.next; | 547 | p = bp->address_list.next; |
552 | laddr = list_entry(p, struct sctp_sockaddr_entry, list); | 548 | laddr = list_entry(p, struct sctp_sockaddr_entry, list); |
553 | sctp_read_unlock(&asoc->base.addr_lock); | ||
554 | |||
555 | chunk = sctp_make_asconf_update_ip(asoc, &laddr->a, addrs, | 549 | chunk = sctp_make_asconf_update_ip(asoc, &laddr->a, addrs, |
556 | addrcnt, SCTP_PARAM_ADD_IP); | 550 | addrcnt, SCTP_PARAM_ADD_IP); |
557 | if (!chunk) { | 551 | if (!chunk) { |
@@ -566,8 +560,6 @@ static int sctp_send_asconf_add_ip(struct sock *sk, | |||
566 | /* Add the new addresses to the bind address list with | 560 | /* Add the new addresses to the bind address list with |
567 | * use_as_src set to 0. | 561 | * use_as_src set to 0. |
568 | */ | 562 | */ |
569 | sctp_local_bh_disable(); | ||
570 | sctp_write_lock(&asoc->base.addr_lock); | ||
571 | addr_buf = addrs; | 563 | addr_buf = addrs; |
572 | for (i = 0; i < addrcnt; i++) { | 564 | for (i = 0; i < addrcnt; i++) { |
573 | addr = (union sctp_addr *)addr_buf; | 565 | addr = (union sctp_addr *)addr_buf; |
@@ -577,8 +569,6 @@ static int sctp_send_asconf_add_ip(struct sock *sk, | |||
577 | GFP_ATOMIC); | 569 | GFP_ATOMIC); |
578 | addr_buf += af->sockaddr_len; | 570 | addr_buf += af->sockaddr_len; |
579 | } | 571 | } |
580 | sctp_write_unlock(&asoc->base.addr_lock); | ||
581 | sctp_local_bh_enable(); | ||
582 | } | 572 | } |
583 | 573 | ||
584 | out: | 574 | out: |
@@ -650,13 +640,7 @@ static int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt) | |||
650 | * socket routing and failover schemes. Refer to comments in | 640 | * socket routing and failover schemes. Refer to comments in |
651 | * sctp_do_bind(). -daisy | 641 | * sctp_do_bind(). -daisy |
652 | */ | 642 | */ |
653 | sctp_local_bh_disable(); | 643 | retval = sctp_del_bind_addr(bp, sa_addr, call_rcu); |
654 | sctp_write_lock(&ep->base.addr_lock); | ||
655 | |||
656 | retval = sctp_del_bind_addr(bp, sa_addr); | ||
657 | |||
658 | sctp_write_unlock(&ep->base.addr_lock); | ||
659 | sctp_local_bh_enable(); | ||
660 | 644 | ||
661 | addr_buf += af->sockaddr_len; | 645 | addr_buf += af->sockaddr_len; |
662 | err_bindx_rem: | 646 | err_bindx_rem: |
@@ -747,14 +731,16 @@ static int sctp_send_asconf_del_ip(struct sock *sk, | |||
747 | * make sure that we do not delete all the addresses in the | 731 | * make sure that we do not delete all the addresses in the |
748 | * association. | 732 | * association. |
749 | */ | 733 | */ |
750 | sctp_read_lock(&asoc->base.addr_lock); | ||
751 | bp = &asoc->base.bind_addr; | 734 | bp = &asoc->base.bind_addr; |
752 | laddr = sctp_find_unmatch_addr(bp, (union sctp_addr *)addrs, | 735 | laddr = sctp_find_unmatch_addr(bp, (union sctp_addr *)addrs, |
753 | addrcnt, sp); | 736 | addrcnt, sp); |
754 | sctp_read_unlock(&asoc->base.addr_lock); | ||
755 | if (!laddr) | 737 | if (!laddr) |
756 | continue; | 738 | continue; |
757 | 739 | ||
740 | /* We do not need RCU protection throughout this loop | ||
741 | * because this is done under a socket lock from the | ||
742 | * setsockopt call. | ||
743 | */ | ||
758 | chunk = sctp_make_asconf_update_ip(asoc, laddr, addrs, addrcnt, | 744 | chunk = sctp_make_asconf_update_ip(asoc, laddr, addrs, addrcnt, |
759 | SCTP_PARAM_DEL_IP); | 745 | SCTP_PARAM_DEL_IP); |
760 | if (!chunk) { | 746 | if (!chunk) { |
@@ -765,23 +751,16 @@ static int sctp_send_asconf_del_ip(struct sock *sk, | |||
765 | /* Reset use_as_src flag for the addresses in the bind address | 751 | /* Reset use_as_src flag for the addresses in the bind address |
766 | * list that are to be deleted. | 752 | * list that are to be deleted. |
767 | */ | 753 | */ |
768 | sctp_local_bh_disable(); | ||
769 | sctp_write_lock(&asoc->base.addr_lock); | ||
770 | addr_buf = addrs; | 754 | addr_buf = addrs; |
771 | for (i = 0; i < addrcnt; i++) { | 755 | for (i = 0; i < addrcnt; i++) { |
772 | laddr = (union sctp_addr *)addr_buf; | 756 | laddr = (union sctp_addr *)addr_buf; |
773 | af = sctp_get_af_specific(laddr->v4.sin_family); | 757 | af = sctp_get_af_specific(laddr->v4.sin_family); |
774 | list_for_each(pos1, &bp->address_list) { | 758 | list_for_each_entry(saddr, &bp->address_list, list) { |
775 | saddr = list_entry(pos1, | ||
776 | struct sctp_sockaddr_entry, | ||
777 | list); | ||
778 | if (sctp_cmp_addr_exact(&saddr->a, laddr)) | 759 | if (sctp_cmp_addr_exact(&saddr->a, laddr)) |
779 | saddr->use_as_src = 0; | 760 | saddr->use_as_src = 0; |
780 | } | 761 | } |
781 | addr_buf += af->sockaddr_len; | 762 | addr_buf += af->sockaddr_len; |
782 | } | 763 | } |
783 | sctp_write_unlock(&asoc->base.addr_lock); | ||
784 | sctp_local_bh_enable(); | ||
785 | 764 | ||
786 | /* Update the route and saddr entries for all the transports | 765 | /* Update the route and saddr entries for all the transports |
787 | * as some of the addresses in the bind address list are | 766 | * as some of the addresses in the bind address list are |
@@ -4058,9 +4037,7 @@ static int sctp_getsockopt_local_addrs_num_old(struct sock *sk, int len, | |||
4058 | sctp_assoc_t id; | 4037 | sctp_assoc_t id; |
4059 | struct sctp_bind_addr *bp; | 4038 | struct sctp_bind_addr *bp; |
4060 | struct sctp_association *asoc; | 4039 | struct sctp_association *asoc; |
4061 | struct list_head *pos, *temp; | ||
4062 | struct sctp_sockaddr_entry *addr; | 4040 | struct sctp_sockaddr_entry *addr; |
4063 | rwlock_t *addr_lock; | ||
4064 | int cnt = 0; | 4041 | int cnt = 0; |
4065 | 4042 | ||
4066 | if (len < sizeof(sctp_assoc_t)) | 4043 | if (len < sizeof(sctp_assoc_t)) |
@@ -4077,17 +4054,13 @@ static int sctp_getsockopt_local_addrs_num_old(struct sock *sk, int len, | |||
4077 | */ | 4054 | */ |
4078 | if (0 == id) { | 4055 | if (0 == id) { |
4079 | bp = &sctp_sk(sk)->ep->base.bind_addr; | 4056 | bp = &sctp_sk(sk)->ep->base.bind_addr; |
4080 | addr_lock = &sctp_sk(sk)->ep->base.addr_lock; | ||
4081 | } else { | 4057 | } else { |
4082 | asoc = sctp_id2assoc(sk, id); | 4058 | asoc = sctp_id2assoc(sk, id); |
4083 | if (!asoc) | 4059 | if (!asoc) |
4084 | return -EINVAL; | 4060 | return -EINVAL; |
4085 | bp = &asoc->base.bind_addr; | 4061 | bp = &asoc->base.bind_addr; |
4086 | addr_lock = &asoc->base.addr_lock; | ||
4087 | } | 4062 | } |
4088 | 4063 | ||
4089 | sctp_read_lock(addr_lock); | ||
4090 | |||
4091 | /* If the endpoint is bound to 0.0.0.0 or ::0, count the valid | 4064 | /* If the endpoint is bound to 0.0.0.0 or ::0, count the valid |
4092 | * addresses from the global local address list. | 4065 | * addresses from the global local address list. |
4093 | */ | 4066 | */ |
@@ -4095,27 +4068,33 @@ static int sctp_getsockopt_local_addrs_num_old(struct sock *sk, int len, | |||
4095 | addr = list_entry(bp->address_list.next, | 4068 | addr = list_entry(bp->address_list.next, |
4096 | struct sctp_sockaddr_entry, list); | 4069 | struct sctp_sockaddr_entry, list); |
4097 | if (sctp_is_any(&addr->a)) { | 4070 | if (sctp_is_any(&addr->a)) { |
4098 | list_for_each_safe(pos, temp, &sctp_local_addr_list) { | 4071 | rcu_read_lock(); |
4099 | addr = list_entry(pos, | 4072 | list_for_each_entry_rcu(addr, |
4100 | struct sctp_sockaddr_entry, | 4073 | &sctp_local_addr_list, list) { |
4101 | list); | 4074 | if (!addr->valid) |
4075 | continue; | ||
4076 | |||
4102 | if ((PF_INET == sk->sk_family) && | 4077 | if ((PF_INET == sk->sk_family) && |
4103 | (AF_INET6 == addr->a.sa.sa_family)) | 4078 | (AF_INET6 == addr->a.sa.sa_family)) |
4104 | continue; | 4079 | continue; |
4080 | |||
4105 | cnt++; | 4081 | cnt++; |
4106 | } | 4082 | } |
4083 | rcu_read_unlock(); | ||
4107 | } else { | 4084 | } else { |
4108 | cnt = 1; | 4085 | cnt = 1; |
4109 | } | 4086 | } |
4110 | goto done; | 4087 | goto done; |
4111 | } | 4088 | } |
4112 | 4089 | ||
4113 | list_for_each(pos, &bp->address_list) { | 4090 | /* Protection on the bound address list is not needed, |
4091 | * since in the socket option context we hold the socket lock, | ||
4092 | * so there is no way that the bound address list can change. | ||
4093 | */ | ||
4094 | list_for_each_entry(addr, &bp->address_list, list) { | ||
4114 | cnt ++; | 4095 | cnt ++; |
4115 | } | 4096 | } |
4116 | |||
4117 | done: | 4097 | done: |
4118 | sctp_read_unlock(addr_lock); | ||
4119 | return cnt; | 4098 | return cnt; |
4120 | } | 4099 | } |
4121 | 4100 | ||
@@ -4126,14 +4105,16 @@ static int sctp_copy_laddrs_old(struct sock *sk, __u16 port, | |||
4126 | int max_addrs, void *to, | 4105 | int max_addrs, void *to, |
4127 | int *bytes_copied) | 4106 | int *bytes_copied) |
4128 | { | 4107 | { |
4129 | struct list_head *pos, *next; | ||
4130 | struct sctp_sockaddr_entry *addr; | 4108 | struct sctp_sockaddr_entry *addr; |
4131 | union sctp_addr temp; | 4109 | union sctp_addr temp; |
4132 | int cnt = 0; | 4110 | int cnt = 0; |
4133 | int addrlen; | 4111 | int addrlen; |
4134 | 4112 | ||
4135 | list_for_each_safe(pos, next, &sctp_local_addr_list) { | 4113 | rcu_read_lock(); |
4136 | addr = list_entry(pos, struct sctp_sockaddr_entry, list); | 4114 | list_for_each_entry_rcu(addr, &sctp_local_addr_list, list) { |
4115 | if (!addr->valid) | ||
4116 | continue; | ||
4117 | |||
4137 | if ((PF_INET == sk->sk_family) && | 4118 | if ((PF_INET == sk->sk_family) && |
4138 | (AF_INET6 == addr->a.sa.sa_family)) | 4119 | (AF_INET6 == addr->a.sa.sa_family)) |
4139 | continue; | 4120 | continue; |
@@ -4148,6 +4129,7 @@ static int sctp_copy_laddrs_old(struct sock *sk, __u16 port, | |||
4148 | cnt ++; | 4129 | cnt ++; |
4149 | if (cnt >= max_addrs) break; | 4130 | if (cnt >= max_addrs) break; |
4150 | } | 4131 | } |
4132 | rcu_read_unlock(); | ||
4151 | 4133 | ||
4152 | return cnt; | 4134 | return cnt; |
4153 | } | 4135 | } |
@@ -4155,14 +4137,16 @@ static int sctp_copy_laddrs_old(struct sock *sk, __u16 port, | |||
4155 | static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to, | 4137 | static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to, |
4156 | size_t space_left, int *bytes_copied) | 4138 | size_t space_left, int *bytes_copied) |
4157 | { | 4139 | { |
4158 | struct list_head *pos, *next; | ||
4159 | struct sctp_sockaddr_entry *addr; | 4140 | struct sctp_sockaddr_entry *addr; |
4160 | union sctp_addr temp; | 4141 | union sctp_addr temp; |
4161 | int cnt = 0; | 4142 | int cnt = 0; |
4162 | int addrlen; | 4143 | int addrlen; |
4163 | 4144 | ||
4164 | list_for_each_safe(pos, next, &sctp_local_addr_list) { | 4145 | rcu_read_lock(); |
4165 | addr = list_entry(pos, struct sctp_sockaddr_entry, list); | 4146 | list_for_each_entry_rcu(addr, &sctp_local_addr_list, list) { |
4147 | if (!addr->valid) | ||
4148 | continue; | ||
4149 | |||
4166 | if ((PF_INET == sk->sk_family) && | 4150 | if ((PF_INET == sk->sk_family) && |
4167 | (AF_INET6 == addr->a.sa.sa_family)) | 4151 | (AF_INET6 == addr->a.sa.sa_family)) |
4168 | continue; | 4152 | continue; |
@@ -4170,8 +4154,10 @@ static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to, | |||
4170 | sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk), | 4154 | sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk), |
4171 | &temp); | 4155 | &temp); |
4172 | addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; | 4156 | addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; |
4173 | if (space_left < addrlen) | 4157 | if (space_left < addrlen) { |
4174 | return -ENOMEM; | 4158 | cnt = -ENOMEM; |
4159 | break; | ||
4160 | } | ||
4175 | memcpy(to, &temp, addrlen); | 4161 | memcpy(to, &temp, addrlen); |
4176 | 4162 | ||
4177 | to += addrlen; | 4163 | to += addrlen; |
@@ -4179,6 +4165,7 @@ static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to, | |||
4179 | space_left -= addrlen; | 4165 | space_left -= addrlen; |
4180 | *bytes_copied += addrlen; | 4166 | *bytes_copied += addrlen; |
4181 | } | 4167 | } |
4168 | rcu_read_unlock(); | ||
4182 | 4169 | ||
4183 | return cnt; | 4170 | return cnt; |
4184 | } | 4171 | } |
@@ -4191,7 +4178,6 @@ static int sctp_getsockopt_local_addrs_old(struct sock *sk, int len, | |||
4191 | { | 4178 | { |
4192 | struct sctp_bind_addr *bp; | 4179 | struct sctp_bind_addr *bp; |
4193 | struct sctp_association *asoc; | 4180 | struct sctp_association *asoc; |
4194 | struct list_head *pos; | ||
4195 | int cnt = 0; | 4181 | int cnt = 0; |
4196 | struct sctp_getaddrs_old getaddrs; | 4182 | struct sctp_getaddrs_old getaddrs; |
4197 | struct sctp_sockaddr_entry *addr; | 4183 | struct sctp_sockaddr_entry *addr; |
@@ -4199,7 +4185,6 @@ static int sctp_getsockopt_local_addrs_old(struct sock *sk, int len, | |||
4199 | union sctp_addr temp; | 4185 | union sctp_addr temp; |
4200 | struct sctp_sock *sp = sctp_sk(sk); | 4186 | struct sctp_sock *sp = sctp_sk(sk); |
4201 | int addrlen; | 4187 | int addrlen; |
4202 | rwlock_t *addr_lock; | ||
4203 | int err = 0; | 4188 | int err = 0; |
4204 | void *addrs; | 4189 | void *addrs; |
4205 | void *buf; | 4190 | void *buf; |
@@ -4221,13 +4206,11 @@ static int sctp_getsockopt_local_addrs_old(struct sock *sk, int len, | |||
4221 | */ | 4206 | */ |
4222 | if (0 == getaddrs.assoc_id) { | 4207 | if (0 == getaddrs.assoc_id) { |
4223 | bp = &sctp_sk(sk)->ep->base.bind_addr; | 4208 | bp = &sctp_sk(sk)->ep->base.bind_addr; |
4224 | addr_lock = &sctp_sk(sk)->ep->base.addr_lock; | ||
4225 | } else { | 4209 | } else { |
4226 | asoc = sctp_id2assoc(sk, getaddrs.assoc_id); | 4210 | asoc = sctp_id2assoc(sk, getaddrs.assoc_id); |
4227 | if (!asoc) | 4211 | if (!asoc) |
4228 | return -EINVAL; | 4212 | return -EINVAL; |
4229 | bp = &asoc->base.bind_addr; | 4213 | bp = &asoc->base.bind_addr; |
4230 | addr_lock = &asoc->base.addr_lock; | ||
4231 | } | 4214 | } |
4232 | 4215 | ||
4233 | to = getaddrs.addrs; | 4216 | to = getaddrs.addrs; |
@@ -4241,8 +4224,6 @@ static int sctp_getsockopt_local_addrs_old(struct sock *sk, int len, | |||
4241 | if (!addrs) | 4224 | if (!addrs) |
4242 | return -ENOMEM; | 4225 | return -ENOMEM; |
4243 | 4226 | ||
4244 | sctp_read_lock(addr_lock); | ||
4245 | |||
4246 | /* If the endpoint is bound to 0.0.0.0 or ::0, get the valid | 4227 | /* If the endpoint is bound to 0.0.0.0 or ::0, get the valid |
4247 | * addresses from the global local address list. | 4228 | * addresses from the global local address list. |
4248 | */ | 4229 | */ |
@@ -4258,8 +4239,11 @@ static int sctp_getsockopt_local_addrs_old(struct sock *sk, int len, | |||
4258 | } | 4239 | } |
4259 | 4240 | ||
4260 | buf = addrs; | 4241 | buf = addrs; |
4261 | list_for_each(pos, &bp->address_list) { | 4242 | /* Protection on the bound address list is not needed since |
4262 | addr = list_entry(pos, struct sctp_sockaddr_entry, list); | 4243 | * in the socket option context we hold a socket lock and |
4244 | * thus the bound address list can't change. | ||
4245 | */ | ||
4246 | list_for_each_entry(addr, &bp->address_list, list) { | ||
4263 | memcpy(&temp, &addr->a, sizeof(temp)); | 4247 | memcpy(&temp, &addr->a, sizeof(temp)); |
4264 | sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp); | 4248 | sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp); |
4265 | addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; | 4249 | addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; |
@@ -4271,8 +4255,6 @@ static int sctp_getsockopt_local_addrs_old(struct sock *sk, int len, | |||
4271 | } | 4255 | } |
4272 | 4256 | ||
4273 | copy_getaddrs: | 4257 | copy_getaddrs: |
4274 | sctp_read_unlock(addr_lock); | ||
4275 | |||
4276 | /* copy the entire address list into the user provided space */ | 4258 | /* copy the entire address list into the user provided space */ |
4277 | if (copy_to_user(to, addrs, bytes_copied)) { | 4259 | if (copy_to_user(to, addrs, bytes_copied)) { |
4278 | err = -EFAULT; | 4260 | err = -EFAULT; |
@@ -4294,7 +4276,6 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len, | |||
4294 | { | 4276 | { |
4295 | struct sctp_bind_addr *bp; | 4277 | struct sctp_bind_addr *bp; |
4296 | struct sctp_association *asoc; | 4278 | struct sctp_association *asoc; |
4297 | struct list_head *pos; | ||
4298 | int cnt = 0; | 4279 | int cnt = 0; |
4299 | struct sctp_getaddrs getaddrs; | 4280 | struct sctp_getaddrs getaddrs; |
4300 | struct sctp_sockaddr_entry *addr; | 4281 | struct sctp_sockaddr_entry *addr; |
@@ -4302,7 +4283,6 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len, | |||
4302 | union sctp_addr temp; | 4283 | union sctp_addr temp; |
4303 | struct sctp_sock *sp = sctp_sk(sk); | 4284 | struct sctp_sock *sp = sctp_sk(sk); |
4304 | int addrlen; | 4285 | int addrlen; |
4305 | rwlock_t *addr_lock; | ||
4306 | int err = 0; | 4286 | int err = 0; |
4307 | size_t space_left; | 4287 | size_t space_left; |
4308 | int bytes_copied = 0; | 4288 | int bytes_copied = 0; |
@@ -4323,13 +4303,11 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len, | |||
4323 | */ | 4303 | */ |
4324 | if (0 == getaddrs.assoc_id) { | 4304 | if (0 == getaddrs.assoc_id) { |
4325 | bp = &sctp_sk(sk)->ep->base.bind_addr; | 4305 | bp = &sctp_sk(sk)->ep->base.bind_addr; |
4326 | addr_lock = &sctp_sk(sk)->ep->base.addr_lock; | ||
4327 | } else { | 4306 | } else { |
4328 | asoc = sctp_id2assoc(sk, getaddrs.assoc_id); | 4307 | asoc = sctp_id2assoc(sk, getaddrs.assoc_id); |
4329 | if (!asoc) | 4308 | if (!asoc) |
4330 | return -EINVAL; | 4309 | return -EINVAL; |
4331 | bp = &asoc->base.bind_addr; | 4310 | bp = &asoc->base.bind_addr; |
4332 | addr_lock = &asoc->base.addr_lock; | ||
4333 | } | 4311 | } |
4334 | 4312 | ||
4335 | to = optval + offsetof(struct sctp_getaddrs,addrs); | 4313 | to = optval + offsetof(struct sctp_getaddrs,addrs); |
@@ -4339,8 +4317,6 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len, | |||
4339 | if (!addrs) | 4317 | if (!addrs) |
4340 | return -ENOMEM; | 4318 | return -ENOMEM; |
4341 | 4319 | ||
4342 | sctp_read_lock(addr_lock); | ||
4343 | |||
4344 | /* If the endpoint is bound to 0.0.0.0 or ::0, get the valid | 4320 | /* If the endpoint is bound to 0.0.0.0 or ::0, get the valid |
4345 | * addresses from the global local address list. | 4321 | * addresses from the global local address list. |
4346 | */ | 4322 | */ |
@@ -4352,21 +4328,24 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len, | |||
4352 | space_left, &bytes_copied); | 4328 | space_left, &bytes_copied); |
4353 | if (cnt < 0) { | 4329 | if (cnt < 0) { |
4354 | err = cnt; | 4330 | err = cnt; |
4355 | goto error_lock; | 4331 | goto out; |
4356 | } | 4332 | } |
4357 | goto copy_getaddrs; | 4333 | goto copy_getaddrs; |
4358 | } | 4334 | } |
4359 | } | 4335 | } |
4360 | 4336 | ||
4361 | buf = addrs; | 4337 | buf = addrs; |
4362 | list_for_each(pos, &bp->address_list) { | 4338 | /* Protection on the bound address list is not needed since |
4363 | addr = list_entry(pos, struct sctp_sockaddr_entry, list); | 4339 | * in the socket option context we hold a socket lock and |
4340 | * thus the bound address list can't change. | ||
4341 | */ | ||
4342 | list_for_each_entry(addr, &bp->address_list, list) { | ||
4364 | memcpy(&temp, &addr->a, sizeof(temp)); | 4343 | memcpy(&temp, &addr->a, sizeof(temp)); |
4365 | sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp); | 4344 | sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp); |
4366 | addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; | 4345 | addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; |
4367 | if (space_left < addrlen) { | 4346 | if (space_left < addrlen) { |
4368 | err = -ENOMEM; /*fixme: right error?*/ | 4347 | err = -ENOMEM; /*fixme: right error?*/ |
4369 | goto error_lock; | 4348 | goto out; |
4370 | } | 4349 | } |
4371 | memcpy(buf, &temp, addrlen); | 4350 | memcpy(buf, &temp, addrlen); |
4372 | buf += addrlen; | 4351 | buf += addrlen; |
@@ -4376,8 +4355,6 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len, | |||
4376 | } | 4355 | } |
4377 | 4356 | ||
4378 | copy_getaddrs: | 4357 | copy_getaddrs: |
4379 | sctp_read_unlock(addr_lock); | ||
4380 | |||
4381 | if (copy_to_user(to, addrs, bytes_copied)) { | 4358 | if (copy_to_user(to, addrs, bytes_copied)) { |
4382 | err = -EFAULT; | 4359 | err = -EFAULT; |
4383 | goto out; | 4360 | goto out; |
@@ -4388,12 +4365,6 @@ copy_getaddrs: | |||
4388 | } | 4365 | } |
4389 | if (put_user(bytes_copied, optlen)) | 4366 | if (put_user(bytes_copied, optlen)) |
4390 | err = -EFAULT; | 4367 | err = -EFAULT; |
4391 | |||
4392 | goto out; | ||
4393 | |||
4394 | error_lock: | ||
4395 | sctp_read_unlock(addr_lock); | ||
4396 | |||
4397 | out: | 4368 | out: |
4398 | kfree(addrs); | 4369 | kfree(addrs); |
4399 | return err; | 4370 | return err; |
@@ -5202,6 +5173,7 @@ SCTP_STATIC int sctp_seqpacket_listen(struct sock *sk, int backlog) | |||
5202 | 5173 | ||
5203 | sctp_unhash_endpoint(ep); | 5174 | sctp_unhash_endpoint(ep); |
5204 | sk->sk_state = SCTP_SS_CLOSED; | 5175 | sk->sk_state = SCTP_SS_CLOSED; |
5176 | return 0; | ||
5205 | } | 5177 | } |
5206 | 5178 | ||
5207 | /* Return if we are already listening. */ | 5179 | /* Return if we are already listening. */ |
@@ -5249,6 +5221,7 @@ SCTP_STATIC int sctp_stream_listen(struct sock *sk, int backlog) | |||
5249 | 5221 | ||
5250 | sctp_unhash_endpoint(ep); | 5222 | sctp_unhash_endpoint(ep); |
5251 | sk->sk_state = SCTP_SS_CLOSED; | 5223 | sk->sk_state = SCTP_SS_CLOSED; |
5224 | return 0; | ||
5252 | } | 5225 | } |
5253 | 5226 | ||
5254 | if (sctp_sstate(sk, LISTENING)) | 5227 | if (sctp_sstate(sk, LISTENING)) |
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c index 34eb977a204d..fa0ba2a5564e 100644 --- a/net/sctp/ulpqueue.c +++ b/net/sctp/ulpqueue.c | |||
@@ -659,6 +659,46 @@ done: | |||
659 | return retval; | 659 | return retval; |
660 | } | 660 | } |
661 | 661 | ||
662 | /* | ||
663 | * Flush out stale fragments from the reassembly queue when processing | ||
664 | * a Forward TSN. | ||
665 | * | ||
666 | * RFC 3758, Section 3.6 | ||
667 | * | ||
668 | * After receiving and processing a FORWARD TSN, the data receiver MUST | ||
669 | * take cautions in updating its re-assembly queue. The receiver MUST | ||
670 | * remove any partially reassembled message, which is still missing one | ||
671 | * or more TSNs earlier than or equal to the new cumulative TSN point. | ||
672 | * In the event that the receiver has invoked the partial delivery API, | ||
673 | * a notification SHOULD also be generated to inform the upper layer API | ||
674 | * that the message being partially delivered will NOT be completed. | ||
675 | */ | ||
676 | void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn) | ||
677 | { | ||
678 | struct sk_buff *pos, *tmp; | ||
679 | struct sctp_ulpevent *event; | ||
680 | __u32 tsn; | ||
681 | |||
682 | if (skb_queue_empty(&ulpq->reasm)) | ||
683 | return; | ||
684 | |||
685 | skb_queue_walk_safe(&ulpq->reasm, pos, tmp) { | ||
686 | event = sctp_skb2event(pos); | ||
687 | tsn = event->tsn; | ||
688 | |||
689 | /* Since the entire message must be abandoned by the | ||
690 | * sender (item A3 in Section 3.5, RFC 3758), we can | ||
691 | * free all fragments on the list that are less then | ||
692 | * or equal to ctsn_point | ||
693 | */ | ||
694 | if (TSN_lte(tsn, fwd_tsn)) { | ||
695 | __skb_unlink(pos, &ulpq->reasm); | ||
696 | sctp_ulpevent_free(event); | ||
697 | } else | ||
698 | break; | ||
699 | } | ||
700 | } | ||
701 | |||
662 | /* Helper function to gather skbs that have possibly become | 702 | /* Helper function to gather skbs that have possibly become |
663 | * ordered by an an incoming chunk. | 703 | * ordered by an an incoming chunk. |
664 | */ | 704 | */ |
@@ -794,7 +834,7 @@ static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq, | |||
794 | /* Helper function to gather skbs that have possibly become | 834 | /* Helper function to gather skbs that have possibly become |
795 | * ordered by forward tsn skipping their dependencies. | 835 | * ordered by forward tsn skipping their dependencies. |
796 | */ | 836 | */ |
797 | static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq) | 837 | static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid) |
798 | { | 838 | { |
799 | struct sk_buff *pos, *tmp; | 839 | struct sk_buff *pos, *tmp; |
800 | struct sctp_ulpevent *cevent; | 840 | struct sctp_ulpevent *cevent; |
@@ -813,31 +853,40 @@ static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq) | |||
813 | csid = cevent->stream; | 853 | csid = cevent->stream; |
814 | cssn = cevent->ssn; | 854 | cssn = cevent->ssn; |
815 | 855 | ||
816 | if (cssn != sctp_ssn_peek(in, csid)) | 856 | /* Have we gone too far? */ |
857 | if (csid > sid) | ||
817 | break; | 858 | break; |
818 | 859 | ||
819 | /* Found it, so mark in the ssnmap. */ | 860 | /* Have we not gone far enough? */ |
820 | sctp_ssn_next(in, csid); | 861 | if (csid < sid) |
862 | continue; | ||
863 | |||
864 | /* see if this ssn has been marked by skipping */ | ||
865 | if (!SSN_lt(cssn, sctp_ssn_peek(in, csid))) | ||
866 | break; | ||
821 | 867 | ||
822 | __skb_unlink(pos, &ulpq->lobby); | 868 | __skb_unlink(pos, &ulpq->lobby); |
823 | if (!event) { | 869 | if (!event) |
824 | /* Create a temporary list to collect chunks on. */ | 870 | /* Create a temporary list to collect chunks on. */ |
825 | event = sctp_skb2event(pos); | 871 | event = sctp_skb2event(pos); |
826 | __skb_queue_tail(&temp, sctp_event2skb(event)); | 872 | |
827 | } else { | 873 | /* Attach all gathered skbs to the event. */ |
828 | /* Attach all gathered skbs to the event. */ | 874 | __skb_queue_tail(&temp, pos); |
829 | __skb_queue_tail(&temp, pos); | ||
830 | } | ||
831 | } | 875 | } |
832 | 876 | ||
833 | /* Send event to the ULP. 'event' is the sctp_ulpevent for | 877 | /* Send event to the ULP. 'event' is the sctp_ulpevent for |
834 | * very first SKB on the 'temp' list. | 878 | * very first SKB on the 'temp' list. |
835 | */ | 879 | */ |
836 | if (event) | 880 | if (event) { |
881 | /* see if we have more ordered that we can deliver */ | ||
882 | sctp_ulpq_retrieve_ordered(ulpq, event); | ||
837 | sctp_ulpq_tail_event(ulpq, event); | 883 | sctp_ulpq_tail_event(ulpq, event); |
884 | } | ||
838 | } | 885 | } |
839 | 886 | ||
840 | /* Skip over an SSN. */ | 887 | /* Skip over an SSN. This is used during the processing of |
888 | * Forwared TSN chunk to skip over the abandoned ordered data | ||
889 | */ | ||
841 | void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn) | 890 | void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn) |
842 | { | 891 | { |
843 | struct sctp_stream *in; | 892 | struct sctp_stream *in; |
@@ -855,7 +904,7 @@ void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn) | |||
855 | /* Go find any other chunks that were waiting for | 904 | /* Go find any other chunks that were waiting for |
856 | * ordering and deliver them if needed. | 905 | * ordering and deliver them if needed. |
857 | */ | 906 | */ |
858 | sctp_ulpq_reap_ordered(ulpq); | 907 | sctp_ulpq_reap_ordered(ulpq, sid); |
859 | return; | 908 | return; |
860 | } | 909 | } |
861 | 910 | ||