diff options
Diffstat (limited to 'net/sctp/associola.c')
| -rw-r--r-- | net/sctp/associola.c | 82 |
1 files changed, 65 insertions, 17 deletions
diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 4f6d6f9d1274..39579c3e0d14 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c | |||
| @@ -1395,35 +1395,44 @@ static inline bool sctp_peer_needs_update(struct sctp_association *asoc) | |||
| 1395 | return false; | 1395 | return false; |
| 1396 | } | 1396 | } |
| 1397 | 1397 | ||
| 1398 | /* Update asoc's rwnd for the approximated state in the buffer, | 1398 | /* Increase asoc's rwnd by len and send any window update SACK if needed. */ |
| 1399 | * and check whether SACK needs to be sent. | 1399 | void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len) |
| 1400 | */ | ||
| 1401 | void sctp_assoc_rwnd_update(struct sctp_association *asoc, bool update_peer) | ||
| 1402 | { | 1400 | { |
| 1403 | int rx_count; | ||
| 1404 | struct sctp_chunk *sack; | 1401 | struct sctp_chunk *sack; |
| 1405 | struct timer_list *timer; | 1402 | struct timer_list *timer; |
| 1406 | 1403 | ||
| 1407 | if (asoc->ep->rcvbuf_policy) | 1404 | if (asoc->rwnd_over) { |
| 1408 | rx_count = atomic_read(&asoc->rmem_alloc); | 1405 | if (asoc->rwnd_over >= len) { |
| 1409 | else | 1406 | asoc->rwnd_over -= len; |
| 1410 | rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc); | 1407 | } else { |
| 1408 | asoc->rwnd += (len - asoc->rwnd_over); | ||
| 1409 | asoc->rwnd_over = 0; | ||
| 1410 | } | ||
| 1411 | } else { | ||
| 1412 | asoc->rwnd += len; | ||
| 1413 | } | ||
| 1411 | 1414 | ||
| 1412 | if ((asoc->base.sk->sk_rcvbuf - rx_count) > 0) | 1415 | /* If we had window pressure, start recovering it |
| 1413 | asoc->rwnd = (asoc->base.sk->sk_rcvbuf - rx_count) >> 1; | 1416 | * once our rwnd had reached the accumulated pressure |
| 1414 | else | 1417 | * threshold. The idea is to recover slowly, but up |
| 1415 | asoc->rwnd = 0; | 1418 | * to the initial advertised window. |
| 1419 | */ | ||
| 1420 | if (asoc->rwnd_press && asoc->rwnd >= asoc->rwnd_press) { | ||
| 1421 | int change = min(asoc->pathmtu, asoc->rwnd_press); | ||
| 1422 | asoc->rwnd += change; | ||
| 1423 | asoc->rwnd_press -= change; | ||
| 1424 | } | ||
| 1416 | 1425 | ||
| 1417 | pr_debug("%s: asoc:%p rwnd=%u, rx_count=%d, sk_rcvbuf=%d\n", | 1426 | pr_debug("%s: asoc:%p rwnd increased by %d to (%u, %u) - %u\n", |
| 1418 | __func__, asoc, asoc->rwnd, rx_count, | 1427 | __func__, asoc, len, asoc->rwnd, asoc->rwnd_over, |
| 1419 | asoc->base.sk->sk_rcvbuf); | 1428 | asoc->a_rwnd); |
| 1420 | 1429 | ||
| 1421 | /* Send a window update SACK if the rwnd has increased by at least the | 1430 | /* Send a window update SACK if the rwnd has increased by at least the |
| 1422 | * minimum of the association's PMTU and half of the receive buffer. | 1431 | * minimum of the association's PMTU and half of the receive buffer. |
| 1423 | * The algorithm used is similar to the one described in | 1432 | * The algorithm used is similar to the one described in |
| 1424 | * Section 4.2.3.3 of RFC 1122. | 1433 | * Section 4.2.3.3 of RFC 1122. |
| 1425 | */ | 1434 | */ |
| 1426 | if (update_peer && sctp_peer_needs_update(asoc)) { | 1435 | if (sctp_peer_needs_update(asoc)) { |
| 1427 | asoc->a_rwnd = asoc->rwnd; | 1436 | asoc->a_rwnd = asoc->rwnd; |
| 1428 | 1437 | ||
| 1429 | pr_debug("%s: sending window update SACK- asoc:%p rwnd:%u " | 1438 | pr_debug("%s: sending window update SACK- asoc:%p rwnd:%u " |
| @@ -1445,6 +1454,45 @@ void sctp_assoc_rwnd_update(struct sctp_association *asoc, bool update_peer) | |||
| 1445 | } | 1454 | } |
| 1446 | } | 1455 | } |
| 1447 | 1456 | ||
| 1457 | /* Decrease asoc's rwnd by len. */ | ||
| 1458 | void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len) | ||
| 1459 | { | ||
| 1460 | int rx_count; | ||
| 1461 | int over = 0; | ||
| 1462 | |||
| 1463 | if (unlikely(!asoc->rwnd || asoc->rwnd_over)) | ||
| 1464 | pr_debug("%s: association:%p has asoc->rwnd:%u, " | ||
| 1465 | "asoc->rwnd_over:%u!\n", __func__, asoc, | ||
| 1466 | asoc->rwnd, asoc->rwnd_over); | ||
| 1467 | |||
| 1468 | if (asoc->ep->rcvbuf_policy) | ||
| 1469 | rx_count = atomic_read(&asoc->rmem_alloc); | ||
| 1470 | else | ||
| 1471 | rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc); | ||
| 1472 | |||
| 1473 | /* If we've reached or overflowed our receive buffer, announce | ||
| 1474 | * a 0 rwnd if rwnd would still be positive. Store the | ||
| 1475 | * the potential pressure overflow so that the window can be restored | ||
| 1476 | * back to original value. | ||
| 1477 | */ | ||
| 1478 | if (rx_count >= asoc->base.sk->sk_rcvbuf) | ||
| 1479 | over = 1; | ||
| 1480 | |||
| 1481 | if (asoc->rwnd >= len) { | ||
| 1482 | asoc->rwnd -= len; | ||
| 1483 | if (over) { | ||
| 1484 | asoc->rwnd_press += asoc->rwnd; | ||
| 1485 | asoc->rwnd = 0; | ||
| 1486 | } | ||
| 1487 | } else { | ||
| 1488 | asoc->rwnd_over = len - asoc->rwnd; | ||
| 1489 | asoc->rwnd = 0; | ||
| 1490 | } | ||
| 1491 | |||
| 1492 | pr_debug("%s: asoc:%p rwnd decreased by %d to (%u, %u, %u)\n", | ||
| 1493 | __func__, asoc, len, asoc->rwnd, asoc->rwnd_over, | ||
| 1494 | asoc->rwnd_press); | ||
| 1495 | } | ||
| 1448 | 1496 | ||
| 1449 | /* Build the bind address list for the association based on info from the | 1497 | /* Build the bind address list for the association based on info from the |
| 1450 | * local endpoint and the remote peer. | 1498 | * local endpoint and the remote peer. |
