diff options
Diffstat (limited to 'net/sctp')
| -rw-r--r-- | net/sctp/associola.c | 211 | ||||
| -rw-r--r-- | net/sctp/ipv6.c | 2 | ||||
| -rw-r--r-- | net/sctp/sm_make_chunk.c | 4 | ||||
| -rw-r--r-- | net/sctp/sm_sideeffect.c | 7 | ||||
| -rw-r--r-- | net/sctp/sm_statefuns.c | 12 | ||||
| -rw-r--r-- | net/sctp/socket.c | 47 | ||||
| -rw-r--r-- | net/sctp/sysctl.c | 18 | ||||
| -rw-r--r-- | net/sctp/ulpevent.c | 8 |
8 files changed, 160 insertions, 149 deletions
diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 5ae609200674..ee13d28d39d1 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c | |||
| @@ -1239,78 +1239,107 @@ void sctp_assoc_update(struct sctp_association *asoc, | |||
| 1239 | } | 1239 | } |
| 1240 | 1240 | ||
| 1241 | /* Update the retran path for sending a retransmitted packet. | 1241 | /* Update the retran path for sending a retransmitted packet. |
| 1242 | * Round-robin through the active transports, else round-robin | 1242 | * See also RFC4960, 6.4. Multi-Homed SCTP Endpoints: |
| 1243 | * through the inactive transports as this is the next best thing | 1243 | * |
| 1244 | * we can try. | 1244 | * When there is outbound data to send and the primary path |
| 1245 | * becomes inactive (e.g., due to failures), or where the | ||
| 1246 | * SCTP user explicitly requests to send data to an | ||
| 1247 | * inactive destination transport address, before reporting | ||
| 1248 | * an error to its ULP, the SCTP endpoint should try to send | ||
| 1249 | * the data to an alternate active destination transport | ||
| 1250 | * address if one exists. | ||
| 1251 | * | ||
| 1252 | * When retransmitting data that timed out, if the endpoint | ||
| 1253 | * is multihomed, it should consider each source-destination | ||
| 1254 | * address pair in its retransmission selection policy. | ||
| 1255 | * When retransmitting timed-out data, the endpoint should | ||
| 1256 | * attempt to pick the most divergent source-destination | ||
| 1257 | * pair from the original source-destination pair to which | ||
| 1258 | * the packet was transmitted. | ||
| 1259 | * | ||
| 1260 | * Note: Rules for picking the most divergent source-destination | ||
| 1261 | * pair are an implementation decision and are not specified | ||
| 1262 | * within this document. | ||
| 1263 | * | ||
| 1264 | * Our basic strategy is to round-robin transports in priorities | ||
| 1265 | * according to sctp_state_prio_map[] e.g., if no such | ||
| 1266 | * transport with state SCTP_ACTIVE exists, round-robin through | ||
| 1267 | * SCTP_UNKNOWN, etc. You get the picture. | ||
| 1245 | */ | 1268 | */ |
| 1246 | void sctp_assoc_update_retran_path(struct sctp_association *asoc) | 1269 | static const u8 sctp_trans_state_to_prio_map[] = { |
| 1270 | [SCTP_ACTIVE] = 3, /* best case */ | ||
| 1271 | [SCTP_UNKNOWN] = 2, | ||
| 1272 | [SCTP_PF] = 1, | ||
| 1273 | [SCTP_INACTIVE] = 0, /* worst case */ | ||
| 1274 | }; | ||
| 1275 | |||
| 1276 | static u8 sctp_trans_score(const struct sctp_transport *trans) | ||
| 1247 | { | 1277 | { |
| 1248 | struct sctp_transport *t, *next; | 1278 | return sctp_trans_state_to_prio_map[trans->state]; |
| 1249 | struct list_head *head = &asoc->peer.transport_addr_list; | 1279 | } |
| 1250 | struct list_head *pos; | ||
| 1251 | 1280 | ||
| 1252 | if (asoc->peer.transport_count == 1) | 1281 | static struct sctp_transport *sctp_trans_elect_best(struct sctp_transport *curr, |
| 1253 | return; | 1282 | struct sctp_transport *best) |
| 1283 | { | ||
| 1284 | if (best == NULL) | ||
| 1285 | return curr; | ||
| 1254 | 1286 | ||
| 1255 | /* Find the next transport in a round-robin fashion. */ | 1287 | return sctp_trans_score(curr) > sctp_trans_score(best) ? curr : best; |
| 1256 | t = asoc->peer.retran_path; | 1288 | } |
| 1257 | pos = &t->transports; | ||
| 1258 | next = NULL; | ||
| 1259 | 1289 | ||
| 1260 | while (1) { | 1290 | void sctp_assoc_update_retran_path(struct sctp_association *asoc) |
| 1261 | /* Skip the head. */ | 1291 | { |
| 1262 | if (pos->next == head) | 1292 | struct sctp_transport *trans = asoc->peer.retran_path; |
| 1263 | pos = head->next; | 1293 | struct sctp_transport *trans_next = NULL; |
| 1264 | else | ||
| 1265 | pos = pos->next; | ||
| 1266 | 1294 | ||
| 1267 | t = list_entry(pos, struct sctp_transport, transports); | 1295 | /* We're done as we only have the one and only path. */ |
| 1296 | if (asoc->peer.transport_count == 1) | ||
| 1297 | return; | ||
| 1298 | /* If active_path and retran_path are the same and active, | ||
| 1299 | * then this is the only active path. Use it. | ||
| 1300 | */ | ||
| 1301 | if (asoc->peer.active_path == asoc->peer.retran_path && | ||
| 1302 | asoc->peer.active_path->state == SCTP_ACTIVE) | ||
| 1303 | return; | ||
| 1268 | 1304 | ||
| 1269 | /* We have exhausted the list, but didn't find any | 1305 | /* Iterate from retran_path's successor back to retran_path. */ |
| 1270 | * other active transports. If so, use the next | 1306 | for (trans = list_next_entry(trans, transports); 1; |
| 1271 | * transport. | 1307 | trans = list_next_entry(trans, transports)) { |
| 1272 | */ | 1308 | /* Manually skip the head element. */ |
| 1273 | if (t == asoc->peer.retran_path) { | 1309 | if (&trans->transports == &asoc->peer.transport_addr_list) |
| 1274 | t = next; | 1310 | continue; |
| 1311 | if (trans->state == SCTP_UNCONFIRMED) | ||
| 1312 | continue; | ||
| 1313 | trans_next = sctp_trans_elect_best(trans, trans_next); | ||
| 1314 | /* Active is good enough for immediate return. */ | ||
| 1315 | if (trans_next->state == SCTP_ACTIVE) | ||
| 1275 | break; | 1316 | break; |
| 1276 | } | 1317 | /* We've reached the end, time to update path. */ |
| 1277 | 1318 | if (trans == asoc->peer.retran_path) | |
| 1278 | /* Try to find an active transport. */ | ||
| 1279 | |||
| 1280 | if ((t->state == SCTP_ACTIVE) || | ||
| 1281 | (t->state == SCTP_UNKNOWN)) { | ||
| 1282 | break; | 1319 | break; |
| 1283 | } else { | ||
| 1284 | /* Keep track of the next transport in case | ||
| 1285 | * we don't find any active transport. | ||
| 1286 | */ | ||
| 1287 | if (t->state != SCTP_UNCONFIRMED && !next) | ||
| 1288 | next = t; | ||
| 1289 | } | ||
| 1290 | } | 1320 | } |
| 1291 | 1321 | ||
| 1292 | if (t) | 1322 | if (trans_next != NULL) |
| 1293 | asoc->peer.retran_path = t; | 1323 | asoc->peer.retran_path = trans_next; |
| 1294 | else | ||
| 1295 | t = asoc->peer.retran_path; | ||
| 1296 | 1324 | ||
| 1297 | pr_debug("%s: association:%p addr:%pISpc\n", __func__, asoc, | 1325 | pr_debug("%s: association:%p updated new path to addr:%pISpc\n", |
| 1298 | &t->ipaddr.sa); | 1326 | __func__, asoc, &asoc->peer.retran_path->ipaddr.sa); |
| 1299 | } | 1327 | } |
| 1300 | 1328 | ||
| 1301 | /* Choose the transport for sending retransmit packet. */ | 1329 | struct sctp_transport * |
| 1302 | struct sctp_transport *sctp_assoc_choose_alter_transport( | 1330 | sctp_assoc_choose_alter_transport(struct sctp_association *asoc, |
| 1303 | struct sctp_association *asoc, struct sctp_transport *last_sent_to) | 1331 | struct sctp_transport *last_sent_to) |
| 1304 | { | 1332 | { |
| 1305 | /* If this is the first time packet is sent, use the active path, | 1333 | /* If this is the first time packet is sent, use the active path, |
| 1306 | * else use the retran path. If the last packet was sent over the | 1334 | * else use the retran path. If the last packet was sent over the |
| 1307 | * retran path, update the retran path and use it. | 1335 | * retran path, update the retran path and use it. |
| 1308 | */ | 1336 | */ |
| 1309 | if (!last_sent_to) | 1337 | if (last_sent_to == NULL) { |
| 1310 | return asoc->peer.active_path; | 1338 | return asoc->peer.active_path; |
| 1311 | else { | 1339 | } else { |
| 1312 | if (last_sent_to == asoc->peer.retran_path) | 1340 | if (last_sent_to == asoc->peer.retran_path) |
| 1313 | sctp_assoc_update_retran_path(asoc); | 1341 | sctp_assoc_update_retran_path(asoc); |
| 1342 | |||
| 1314 | return asoc->peer.retran_path; | 1343 | return asoc->peer.retran_path; |
| 1315 | } | 1344 | } |
| 1316 | } | 1345 | } |
| @@ -1367,44 +1396,35 @@ static inline bool sctp_peer_needs_update(struct sctp_association *asoc) | |||
| 1367 | return false; | 1396 | return false; |
| 1368 | } | 1397 | } |
| 1369 | 1398 | ||
| 1370 | /* Increase asoc's rwnd by len and send any window update SACK if needed. */ | 1399 | /* Update asoc's rwnd for the approximated state in the buffer, |
| 1371 | void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len) | 1400 | * and check whether SACK needs to be sent. |
| 1401 | */ | ||
| 1402 | void sctp_assoc_rwnd_update(struct sctp_association *asoc, bool update_peer) | ||
| 1372 | { | 1403 | { |
| 1404 | int rx_count; | ||
| 1373 | struct sctp_chunk *sack; | 1405 | struct sctp_chunk *sack; |
| 1374 | struct timer_list *timer; | 1406 | struct timer_list *timer; |
| 1375 | 1407 | ||
| 1376 | if (asoc->rwnd_over) { | 1408 | if (asoc->ep->rcvbuf_policy) |
| 1377 | if (asoc->rwnd_over >= len) { | 1409 | rx_count = atomic_read(&asoc->rmem_alloc); |
| 1378 | asoc->rwnd_over -= len; | 1410 | else |
| 1379 | } else { | 1411 | rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc); |
| 1380 | asoc->rwnd += (len - asoc->rwnd_over); | ||
| 1381 | asoc->rwnd_over = 0; | ||
| 1382 | } | ||
| 1383 | } else { | ||
| 1384 | asoc->rwnd += len; | ||
| 1385 | } | ||
| 1386 | 1412 | ||
| 1387 | /* If we had window pressure, start recovering it | 1413 | if ((asoc->base.sk->sk_rcvbuf - rx_count) > 0) |
| 1388 | * once our rwnd had reached the accumulated pressure | 1414 | asoc->rwnd = (asoc->base.sk->sk_rcvbuf - rx_count) >> 1; |
| 1389 | * threshold. The idea is to recover slowly, but up | 1415 | else |
| 1390 | * to the initial advertised window. | 1416 | asoc->rwnd = 0; |
| 1391 | */ | ||
| 1392 | if (asoc->rwnd_press && asoc->rwnd >= asoc->rwnd_press) { | ||
| 1393 | int change = min(asoc->pathmtu, asoc->rwnd_press); | ||
| 1394 | asoc->rwnd += change; | ||
| 1395 | asoc->rwnd_press -= change; | ||
| 1396 | } | ||
| 1397 | 1417 | ||
| 1398 | pr_debug("%s: asoc:%p rwnd increased by %d to (%u, %u) - %u\n", | 1418 | pr_debug("%s: asoc:%p rwnd=%u, rx_count=%d, sk_rcvbuf=%d\n", |
| 1399 | __func__, asoc, len, asoc->rwnd, asoc->rwnd_over, | 1419 | __func__, asoc, asoc->rwnd, rx_count, |
| 1400 | asoc->a_rwnd); | 1420 | asoc->base.sk->sk_rcvbuf); |
| 1401 | 1421 | ||
| 1402 | /* Send a window update SACK if the rwnd has increased by at least the | 1422 | /* Send a window update SACK if the rwnd has increased by at least the |
| 1403 | * minimum of the association's PMTU and half of the receive buffer. | 1423 | * minimum of the association's PMTU and half of the receive buffer. |
| 1404 | * The algorithm used is similar to the one described in | 1424 | * The algorithm used is similar to the one described in |
| 1405 | * Section 4.2.3.3 of RFC 1122. | 1425 | * Section 4.2.3.3 of RFC 1122. |
| 1406 | */ | 1426 | */ |
| 1407 | if (sctp_peer_needs_update(asoc)) { | 1427 | if (update_peer && sctp_peer_needs_update(asoc)) { |
| 1408 | asoc->a_rwnd = asoc->rwnd; | 1428 | asoc->a_rwnd = asoc->rwnd; |
| 1409 | 1429 | ||
| 1410 | pr_debug("%s: sending window update SACK- asoc:%p rwnd:%u " | 1430 | pr_debug("%s: sending window update SACK- asoc:%p rwnd:%u " |
| @@ -1426,45 +1446,6 @@ void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len) | |||
| 1426 | } | 1446 | } |
| 1427 | } | 1447 | } |
| 1428 | 1448 | ||
| 1429 | /* Decrease asoc's rwnd by len. */ | ||
| 1430 | void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len) | ||
| 1431 | { | ||
| 1432 | int rx_count; | ||
| 1433 | int over = 0; | ||
| 1434 | |||
| 1435 | if (unlikely(!asoc->rwnd || asoc->rwnd_over)) | ||
| 1436 | pr_debug("%s: association:%p has asoc->rwnd:%u, " | ||
| 1437 | "asoc->rwnd_over:%u!\n", __func__, asoc, | ||
| 1438 | asoc->rwnd, asoc->rwnd_over); | ||
| 1439 | |||
| 1440 | if (asoc->ep->rcvbuf_policy) | ||
| 1441 | rx_count = atomic_read(&asoc->rmem_alloc); | ||
| 1442 | else | ||
| 1443 | rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc); | ||
| 1444 | |||
| 1445 | /* If we've reached or overflowed our receive buffer, announce | ||
| 1446 | * a 0 rwnd if rwnd would still be positive. Store the | ||
| 1447 | * the potential pressure overflow so that the window can be restored | ||
| 1448 | * back to original value. | ||
| 1449 | */ | ||
| 1450 | if (rx_count >= asoc->base.sk->sk_rcvbuf) | ||
| 1451 | over = 1; | ||
| 1452 | |||
| 1453 | if (asoc->rwnd >= len) { | ||
| 1454 | asoc->rwnd -= len; | ||
| 1455 | if (over) { | ||
| 1456 | asoc->rwnd_press += asoc->rwnd; | ||
| 1457 | asoc->rwnd = 0; | ||
| 1458 | } | ||
| 1459 | } else { | ||
| 1460 | asoc->rwnd_over = len - asoc->rwnd; | ||
| 1461 | asoc->rwnd = 0; | ||
| 1462 | } | ||
| 1463 | |||
| 1464 | pr_debug("%s: asoc:%p rwnd decreased by %d to (%u, %u, %u)\n", | ||
| 1465 | __func__, asoc, len, asoc->rwnd, asoc->rwnd_over, | ||
| 1466 | asoc->rwnd_press); | ||
| 1467 | } | ||
| 1468 | 1449 | ||
| 1469 | /* Build the bind address list for the association based on info from the | 1450 | /* Build the bind address list for the association based on info from the |
| 1470 | * local endpoint and the remote peer. | 1451 | * local endpoint and the remote peer. |
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index 0f6259a6a932..2b1738ef9394 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c | |||
| @@ -662,6 +662,8 @@ static struct sock *sctp_v6_create_accept_sk(struct sock *sk, | |||
| 662 | */ | 662 | */ |
| 663 | sctp_v6_to_sk_daddr(&asoc->peer.primary_addr, newsk); | 663 | sctp_v6_to_sk_daddr(&asoc->peer.primary_addr, newsk); |
| 664 | 664 | ||
| 665 | newsk->sk_v6_rcv_saddr = sk->sk_v6_rcv_saddr; | ||
| 666 | |||
| 665 | sk_refcnt_debug_inc(newsk); | 667 | sk_refcnt_debug_inc(newsk); |
| 666 | 668 | ||
| 667 | if (newsk->sk_prot->init(newsk)) { | 669 | if (newsk->sk_prot->init(newsk)) { |
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 632090b961c3..3a1767ef3201 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c | |||
| @@ -1421,8 +1421,8 @@ static void sctp_chunk_destroy(struct sctp_chunk *chunk) | |||
| 1421 | BUG_ON(!list_empty(&chunk->list)); | 1421 | BUG_ON(!list_empty(&chunk->list)); |
| 1422 | list_del_init(&chunk->transmitted_list); | 1422 | list_del_init(&chunk->transmitted_list); |
| 1423 | 1423 | ||
| 1424 | /* Free the chunk skb data and the SCTP_chunk stub itself. */ | 1424 | consume_skb(chunk->skb); |
| 1425 | dev_kfree_skb(chunk->skb); | 1425 | consume_skb(chunk->auth_chunk); |
| 1426 | 1426 | ||
| 1427 | SCTP_DBG_OBJCNT_DEC(chunk); | 1427 | SCTP_DBG_OBJCNT_DEC(chunk); |
| 1428 | kmem_cache_free(sctp_chunk_cachep, chunk); | 1428 | kmem_cache_free(sctp_chunk_cachep, chunk); |
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index bd859154000e..5d6883ff00c3 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c | |||
| @@ -495,11 +495,12 @@ static void sctp_do_8_2_transport_strike(sctp_cmd_seq_t *commands, | |||
| 495 | } | 495 | } |
| 496 | 496 | ||
| 497 | /* If the transport error count is greater than the pf_retrans | 497 | /* If the transport error count is greater than the pf_retrans |
| 498 | * threshold, and less than pathmaxrtx, then mark this transport | 498 | * threshold, and less than pathmaxrtx, and if the current state |
| 499 | * as Partially Failed, ee SCTP Quick Failover Draft, secon 5.1, | 499 | * is not SCTP_UNCONFIRMED, then mark this transport as Partially |
| 500 | * point 1 | 500 | * Failed, see SCTP Quick Failover Draft, section 5.1 |
| 501 | */ | 501 | */ |
| 502 | if ((transport->state != SCTP_PF) && | 502 | if ((transport->state != SCTP_PF) && |
| 503 | (transport->state != SCTP_UNCONFIRMED) && | ||
| 503 | (asoc->pf_retrans < transport->pathmaxrxt) && | 504 | (asoc->pf_retrans < transport->pathmaxrxt) && |
| 504 | (transport->error_count > asoc->pf_retrans)) { | 505 | (transport->error_count > asoc->pf_retrans)) { |
| 505 | 506 | ||
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 483dcd71b3c5..01e002430c85 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c | |||
| @@ -758,6 +758,12 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(struct net *net, | |||
| 758 | struct sctp_chunk auth; | 758 | struct sctp_chunk auth; |
| 759 | sctp_ierror_t ret; | 759 | sctp_ierror_t ret; |
| 760 | 760 | ||
| 761 | /* Make sure that we and the peer are AUTH capable */ | ||
| 762 | if (!net->sctp.auth_enable || !new_asoc->peer.auth_capable) { | ||
| 763 | sctp_association_free(new_asoc); | ||
| 764 | return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); | ||
| 765 | } | ||
| 766 | |||
| 761 | /* set-up our fake chunk so that we can process it */ | 767 | /* set-up our fake chunk so that we can process it */ |
| 762 | auth.skb = chunk->auth_chunk; | 768 | auth.skb = chunk->auth_chunk; |
| 763 | auth.asoc = chunk->asoc; | 769 | auth.asoc = chunk->asoc; |
| @@ -768,10 +774,6 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(struct net *net, | |||
| 768 | auth.transport = chunk->transport; | 774 | auth.transport = chunk->transport; |
| 769 | 775 | ||
| 770 | ret = sctp_sf_authenticate(net, ep, new_asoc, type, &auth); | 776 | ret = sctp_sf_authenticate(net, ep, new_asoc, type, &auth); |
| 771 | |||
| 772 | /* We can now safely free the auth_chunk clone */ | ||
| 773 | kfree_skb(chunk->auth_chunk); | ||
| 774 | |||
| 775 | if (ret != SCTP_IERROR_NO_ERROR) { | 777 | if (ret != SCTP_IERROR_NO_ERROR) { |
| 776 | sctp_association_free(new_asoc); | 778 | sctp_association_free(new_asoc); |
| 777 | return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); | 779 | return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); |
| @@ -6176,7 +6178,7 @@ static int sctp_eat_data(const struct sctp_association *asoc, | |||
| 6176 | * PMTU. In cases, such as loopback, this might be a rather | 6178 | * PMTU. In cases, such as loopback, this might be a rather |
| 6177 | * large spill over. | 6179 | * large spill over. |
| 6178 | */ | 6180 | */ |
| 6179 | if ((!chunk->data_accepted) && (!asoc->rwnd || asoc->rwnd_over || | 6181 | if ((!chunk->data_accepted) && (!asoc->rwnd || |
| 6180 | (datalen > asoc->rwnd + asoc->frag_point))) { | 6182 | (datalen > asoc->rwnd + asoc->frag_point))) { |
| 6181 | 6183 | ||
| 6182 | /* If this is the next TSN, consider reneging to make | 6184 | /* If this is the next TSN, consider reneging to make |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 9e91d6e5df63..981aaf8b6ace 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
| @@ -64,6 +64,7 @@ | |||
| 64 | #include <linux/crypto.h> | 64 | #include <linux/crypto.h> |
| 65 | #include <linux/slab.h> | 65 | #include <linux/slab.h> |
| 66 | #include <linux/file.h> | 66 | #include <linux/file.h> |
| 67 | #include <linux/compat.h> | ||
| 67 | 68 | ||
| 68 | #include <net/ip.h> | 69 | #include <net/ip.h> |
| 69 | #include <net/icmp.h> | 70 | #include <net/icmp.h> |
| @@ -1368,11 +1369,19 @@ static int sctp_setsockopt_connectx(struct sock *sk, | |||
| 1368 | /* | 1369 | /* |
| 1369 | * New (hopefully final) interface for the API. | 1370 | * New (hopefully final) interface for the API. |
| 1370 | * We use the sctp_getaddrs_old structure so that use-space library | 1371 | * We use the sctp_getaddrs_old structure so that use-space library |
| 1371 | * can avoid any unnecessary allocations. The only defferent part | 1372 | * can avoid any unnecessary allocations. The only different part |
| 1372 | * is that we store the actual length of the address buffer into the | 1373 | * is that we store the actual length of the address buffer into the |
| 1373 | * addrs_num structure member. That way we can re-use the existing | 1374 | * addrs_num structure member. That way we can re-use the existing |
| 1374 | * code. | 1375 | * code. |
| 1375 | */ | 1376 | */ |
| 1377 | #ifdef CONFIG_COMPAT | ||
| 1378 | struct compat_sctp_getaddrs_old { | ||
| 1379 | sctp_assoc_t assoc_id; | ||
| 1380 | s32 addr_num; | ||
| 1381 | compat_uptr_t addrs; /* struct sockaddr * */ | ||
| 1382 | }; | ||
| 1383 | #endif | ||
| 1384 | |||
| 1376 | static int sctp_getsockopt_connectx3(struct sock *sk, int len, | 1385 | static int sctp_getsockopt_connectx3(struct sock *sk, int len, |
| 1377 | char __user *optval, | 1386 | char __user *optval, |
| 1378 | int __user *optlen) | 1387 | int __user *optlen) |
| @@ -1381,16 +1390,30 @@ static int sctp_getsockopt_connectx3(struct sock *sk, int len, | |||
| 1381 | sctp_assoc_t assoc_id = 0; | 1390 | sctp_assoc_t assoc_id = 0; |
| 1382 | int err = 0; | 1391 | int err = 0; |
| 1383 | 1392 | ||
| 1384 | if (len < sizeof(param)) | 1393 | #ifdef CONFIG_COMPAT |
| 1385 | return -EINVAL; | 1394 | if (is_compat_task()) { |
| 1395 | struct compat_sctp_getaddrs_old param32; | ||
| 1386 | 1396 | ||
| 1387 | if (copy_from_user(¶m, optval, sizeof(param))) | 1397 | if (len < sizeof(param32)) |
| 1388 | return -EFAULT; | 1398 | return -EINVAL; |
| 1399 | if (copy_from_user(¶m32, optval, sizeof(param32))) | ||
| 1400 | return -EFAULT; | ||
| 1389 | 1401 | ||
| 1390 | err = __sctp_setsockopt_connectx(sk, | 1402 | param.assoc_id = param32.assoc_id; |
| 1391 | (struct sockaddr __user *)param.addrs, | 1403 | param.addr_num = param32.addr_num; |
| 1392 | param.addr_num, &assoc_id); | 1404 | param.addrs = compat_ptr(param32.addrs); |
| 1405 | } else | ||
| 1406 | #endif | ||
| 1407 | { | ||
| 1408 | if (len < sizeof(param)) | ||
| 1409 | return -EINVAL; | ||
| 1410 | if (copy_from_user(¶m, optval, sizeof(param))) | ||
| 1411 | return -EFAULT; | ||
| 1412 | } | ||
| 1393 | 1413 | ||
| 1414 | err = __sctp_setsockopt_connectx(sk, (struct sockaddr __user *) | ||
| 1415 | param.addrs, param.addr_num, | ||
| 1416 | &assoc_id); | ||
| 1394 | if (err == 0 || err == -EINPROGRESS) { | 1417 | if (err == 0 || err == -EINPROGRESS) { |
| 1395 | if (copy_to_user(optval, &assoc_id, sizeof(assoc_id))) | 1418 | if (copy_to_user(optval, &assoc_id, sizeof(assoc_id))) |
| 1396 | return -EFAULT; | 1419 | return -EFAULT; |
| @@ -2092,12 +2115,6 @@ static int sctp_recvmsg(struct kiocb *iocb, struct sock *sk, | |||
| 2092 | sctp_skb_pull(skb, copied); | 2115 | sctp_skb_pull(skb, copied); |
| 2093 | skb_queue_head(&sk->sk_receive_queue, skb); | 2116 | skb_queue_head(&sk->sk_receive_queue, skb); |
| 2094 | 2117 | ||
| 2095 | /* When only partial message is copied to the user, increase | ||
| 2096 | * rwnd by that amount. If all the data in the skb is read, | ||
| 2097 | * rwnd is updated when the event is freed. | ||
| 2098 | */ | ||
| 2099 | if (!sctp_ulpevent_is_notification(event)) | ||
| 2100 | sctp_assoc_rwnd_increase(event->asoc, copied); | ||
| 2101 | goto out; | 2118 | goto out; |
| 2102 | } else if ((event->msg_flags & MSG_NOTIFICATION) || | 2119 | } else if ((event->msg_flags & MSG_NOTIFICATION) || |
| 2103 | (event->msg_flags & MSG_EOR)) | 2120 | (event->msg_flags & MSG_EOR)) |
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c index 7135e617ab0f..35c8923b5554 100644 --- a/net/sctp/sysctl.c +++ b/net/sctp/sysctl.c | |||
| @@ -151,6 +151,7 @@ static struct ctl_table sctp_net_table[] = { | |||
| 151 | }, | 151 | }, |
| 152 | { | 152 | { |
| 153 | .procname = "cookie_hmac_alg", | 153 | .procname = "cookie_hmac_alg", |
| 154 | .data = &init_net.sctp.sctp_hmac_alg, | ||
| 154 | .maxlen = 8, | 155 | .maxlen = 8, |
| 155 | .mode = 0644, | 156 | .mode = 0644, |
| 156 | .proc_handler = proc_sctp_do_hmac_alg, | 157 | .proc_handler = proc_sctp_do_hmac_alg, |
| @@ -401,15 +402,18 @@ static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write, | |||
| 401 | 402 | ||
| 402 | int sctp_sysctl_net_register(struct net *net) | 403 | int sctp_sysctl_net_register(struct net *net) |
| 403 | { | 404 | { |
| 404 | struct ctl_table *table; | 405 | struct ctl_table *table = sctp_net_table; |
| 405 | int i; | 406 | |
| 407 | if (!net_eq(net, &init_net)) { | ||
| 408 | int i; | ||
| 406 | 409 | ||
| 407 | table = kmemdup(sctp_net_table, sizeof(sctp_net_table), GFP_KERNEL); | 410 | table = kmemdup(sctp_net_table, sizeof(sctp_net_table), GFP_KERNEL); |
| 408 | if (!table) | 411 | if (!table) |
| 409 | return -ENOMEM; | 412 | return -ENOMEM; |
| 410 | 413 | ||
| 411 | for (i = 0; table[i].data; i++) | 414 | for (i = 0; table[i].data; i++) |
| 412 | table[i].data += (char *)(&net->sctp) - (char *)&init_net.sctp; | 415 | table[i].data += (char *)(&net->sctp) - (char *)&init_net.sctp; |
| 416 | } | ||
| 413 | 417 | ||
| 414 | net->sctp.sysctl_header = register_net_sysctl(net, "net/sctp", table); | 418 | net->sctp.sysctl_header = register_net_sysctl(net, "net/sctp", table); |
| 415 | return 0; | 419 | return 0; |
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c index 85c64658bd0b..8d198ae03606 100644 --- a/net/sctp/ulpevent.c +++ b/net/sctp/ulpevent.c | |||
| @@ -989,7 +989,7 @@ static void sctp_ulpevent_receive_data(struct sctp_ulpevent *event, | |||
| 989 | skb = sctp_event2skb(event); | 989 | skb = sctp_event2skb(event); |
| 990 | /* Set the owner and charge rwnd for bytes received. */ | 990 | /* Set the owner and charge rwnd for bytes received. */ |
| 991 | sctp_ulpevent_set_owner(event, asoc); | 991 | sctp_ulpevent_set_owner(event, asoc); |
| 992 | sctp_assoc_rwnd_decrease(asoc, skb_headlen(skb)); | 992 | sctp_assoc_rwnd_update(asoc, false); |
| 993 | 993 | ||
| 994 | if (!skb->data_len) | 994 | if (!skb->data_len) |
| 995 | return; | 995 | return; |
| @@ -1011,6 +1011,7 @@ static void sctp_ulpevent_release_data(struct sctp_ulpevent *event) | |||
| 1011 | { | 1011 | { |
| 1012 | struct sk_buff *skb, *frag; | 1012 | struct sk_buff *skb, *frag; |
| 1013 | unsigned int len; | 1013 | unsigned int len; |
| 1014 | struct sctp_association *asoc; | ||
| 1014 | 1015 | ||
| 1015 | /* Current stack structures assume that the rcv buffer is | 1016 | /* Current stack structures assume that the rcv buffer is |
| 1016 | * per socket. For UDP style sockets this is not true as | 1017 | * per socket. For UDP style sockets this is not true as |
| @@ -1035,8 +1036,11 @@ static void sctp_ulpevent_release_data(struct sctp_ulpevent *event) | |||
| 1035 | } | 1036 | } |
| 1036 | 1037 | ||
| 1037 | done: | 1038 | done: |
| 1038 | sctp_assoc_rwnd_increase(event->asoc, len); | 1039 | asoc = event->asoc; |
| 1040 | sctp_association_hold(asoc); | ||
| 1039 | sctp_ulpevent_release_owner(event); | 1041 | sctp_ulpevent_release_owner(event); |
| 1042 | sctp_assoc_rwnd_update(asoc, true); | ||
| 1043 | sctp_association_put(asoc); | ||
| 1040 | } | 1044 | } |
| 1041 | 1045 | ||
| 1042 | static void sctp_ulpevent_release_frag_data(struct sctp_ulpevent *event) | 1046 | static void sctp_ulpevent_release_frag_data(struct sctp_ulpevent *event) |
