aboutsummaryrefslogtreecommitdiffstats
path: root/net/sctp/sm_statefuns.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/sctp/sm_statefuns.c')
-rw-r--r--net/sctp/sm_statefuns.c74
1 files changed, 18 insertions, 56 deletions
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index a583d67cab63..ec0328b1cdb1 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -5428,10 +5428,8 @@ static int sctp_eat_data(const struct sctp_association *asoc,
5428 sctp_verb_t deliver; 5428 sctp_verb_t deliver;
5429 int tmp; 5429 int tmp;
5430 __u32 tsn; 5430 __u32 tsn;
5431 int account_value;
5432 struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map; 5431 struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map;
5433 struct sock *sk = asoc->base.sk; 5432 struct sock *sk = asoc->base.sk;
5434 int rcvbuf_over = 0;
5435 5433
5436 data_hdr = chunk->subh.data_hdr = (sctp_datahdr_t *)chunk->skb->data; 5434 data_hdr = chunk->subh.data_hdr = (sctp_datahdr_t *)chunk->skb->data;
5437 skb_pull(chunk->skb, sizeof(sctp_datahdr_t)); 5435 skb_pull(chunk->skb, sizeof(sctp_datahdr_t));
@@ -5441,48 +5439,6 @@ static int sctp_eat_data(const struct sctp_association *asoc,
5441 5439
5442 /* ASSERT: Now skb->data is really the user data. */ 5440 /* ASSERT: Now skb->data is really the user data. */
5443 5441
5444 /*
5445 * If we are established, and we have used up our receive buffer
5446 * memory, think about droping the frame.
5447 * Note that we have an opportunity to improve performance here.
5448 * If we accept one chunk from an skbuff, we have to keep all the
5449 * memory of that skbuff around until the chunk is read into user
5450 * space. Therefore, once we accept 1 chunk we may as well accept all
5451 * remaining chunks in the skbuff. The data_accepted flag helps us do
5452 * that.
5453 */
5454 if ((asoc->state == SCTP_STATE_ESTABLISHED) && (!chunk->data_accepted)) {
5455 /*
5456 * If the receive buffer policy is 1, then each
5457 * association can allocate up to sk_rcvbuf bytes
5458 * otherwise, all the associations in aggregate
5459 * may allocate up to sk_rcvbuf bytes
5460 */
5461 if (asoc->ep->rcvbuf_policy)
5462 account_value = atomic_read(&asoc->rmem_alloc);
5463 else
5464 account_value = atomic_read(&sk->sk_rmem_alloc);
5465 if (account_value > sk->sk_rcvbuf) {
5466 /*
5467 * We need to make forward progress, even when we are
5468 * under memory pressure, so we always allow the
5469 * next tsn after the ctsn ack point to be accepted.
5470 * This lets us avoid deadlocks in which we have to
5471 * drop frames that would otherwise let us drain the
5472 * receive queue.
5473 */
5474 if ((sctp_tsnmap_get_ctsn(map) + 1) != tsn)
5475 return SCTP_IERROR_IGNORE_TSN;
5476
5477 /*
5478 * We're going to accept the frame but we should renege
5479 * to make space for it. This will send us down that
5480 * path later in this function.
5481 */
5482 rcvbuf_over = 1;
5483 }
5484 }
5485
5486 /* Process ECN based congestion. 5442 /* Process ECN based congestion.
5487 * 5443 *
5488 * Since the chunk structure is reused for all chunks within 5444 * Since the chunk structure is reused for all chunks within
@@ -5542,18 +5498,9 @@ static int sctp_eat_data(const struct sctp_association *asoc,
5542 * seems a bit troublesome in that frag_point varies based on 5498 * seems a bit troublesome in that frag_point varies based on
5543 * PMTU. In cases, such as loopback, this might be a rather 5499 * PMTU. In cases, such as loopback, this might be a rather
5544 * large spill over. 5500 * large spill over.
5545 * NOTE: If we have a full receive buffer here, we only renege if 5501 */
5546 * our receiver can still make progress without the tsn being 5502 if ((!chunk->data_accepted) && (!asoc->rwnd || asoc->rwnd_over ||
5547 * received. We do this because in the event that the associations 5503 (datalen > asoc->rwnd + asoc->frag_point))) {
5548 * receive queue is empty we are filling a leading gap, and since
5549 * reneging moves the gap to the end of the tsn stream, we are likely
5550 * to stall again very shortly. Avoiding the renege when we fill a
5551 * leading gap is a good heuristic for avoiding such steady state
5552 * stalls.
5553 */
5554 if (!asoc->rwnd || asoc->rwnd_over ||
5555 (datalen > asoc->rwnd + asoc->frag_point) ||
5556 (rcvbuf_over && (!skb_queue_len(&sk->sk_receive_queue)))) {
5557 5504
5558 /* If this is the next TSN, consider reneging to make 5505 /* If this is the next TSN, consider reneging to make
5559 * room. Note: Playing nice with a confused sender. A 5506 * room. Note: Playing nice with a confused sender. A
@@ -5574,6 +5521,21 @@ static int sctp_eat_data(const struct sctp_association *asoc,
5574 } 5521 }
5575 5522
5576 /* 5523 /*
5524 * Also try to renege to limit our memory usage in the event that
5525 * we are under memory pressure
5526 * If we can't renege, don't worry about it, the sk_stream_rmem_schedule
5527 * in sctp_ulpevent_make_rcvmsg will drop the frame if we grow our
5528 * memory usage too much
5529 */
5530 if (*sk->sk_prot_creator->memory_pressure) {
5531 if (sctp_tsnmap_has_gap(map) &&
5532 (sctp_tsnmap_get_ctsn(map) + 1) == tsn) {
5533 SCTP_DEBUG_PRINTK("Under Pressure! Reneging for tsn:%u\n", tsn);
5534 deliver = SCTP_CMD_RENEGE;
5535 }
5536 }
5537
5538 /*
5577 * Section 3.3.10.9 No User Data (9) 5539 * Section 3.3.10.9 No User Data (9)
5578 * 5540 *
5579 * Cause of error 5541 * Cause of error