diff options
author | Xin Long <lucien.xin@gmail.com> | 2019-04-15 05:15:07 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2019-04-15 16:36:51 -0400 |
commit | 9dde27de3e5efa0d032f3c891a0ca833a0d31911 (patch) | |
tree | baba7e7942d6e6a3162db024a2995f306d0afad4 | |
parent | 1033990ac5b2ab6cee93734cb6d301aa3a35bcaa (diff) |
sctp: implement memory accounting on rx path
sk_forward_alloc's updating is also done on rx path, but to be consistent
we change to use sk_mem_charge() in sctp_skb_set_owner_r().
In sctp_eat_data(), it's not enough to check sctp_memory_pressure only,
which doesn't work for mem_cgroup_sockets_enabled, so we change to use
sk_under_memory_pressure().
When it's under memory pressure, sk_mem_reclaim() and sk_rmem_schedule()
should be called on both RENEGE or CHUNK DELIVERY path exit the memory
pressure status as soon as possible.
Note that sk_rmem_schedule() is using datalen to make things easy there.
Reported-by: Matteo Croce <mcroce@redhat.com>
Tested-by: Matteo Croce <mcroce@redhat.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Acked-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
Signed-off-by: Xin Long <lucien.xin@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | include/net/sctp/sctp.h | 2 | ||||
-rw-r--r-- | net/sctp/sm_statefuns.c | 6 | ||||
-rw-r--r-- | net/sctp/ulpevent.c | 19 | ||||
-rw-r--r-- | net/sctp/ulpqueue.c | 3 |
4 files changed, 15 insertions, 15 deletions
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index 1d13ec3f2707..eefdfa5abf6e 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h | |||
@@ -421,7 +421,7 @@ static inline void sctp_skb_set_owner_r(struct sk_buff *skb, struct sock *sk) | |||
421 | /* | 421 | /* |
422 | * This mimics the behavior of skb_set_owner_r | 422 | * This mimics the behavior of skb_set_owner_r |
423 | */ | 423 | */ |
424 | sk->sk_forward_alloc -= event->rmem_len; | 424 | sk_mem_charge(sk, event->rmem_len); |
425 | } | 425 | } |
426 | 426 | ||
427 | /* Tests if the list has one and only one entry. */ | 427 | /* Tests if the list has one and only one entry. */ |
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index c9ae3404b1bb..7dfc34b28f4f 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c | |||
@@ -6412,13 +6412,15 @@ static int sctp_eat_data(const struct sctp_association *asoc, | |||
6412 | * in sctp_ulpevent_make_rcvmsg will drop the frame if we grow our | 6412 | * in sctp_ulpevent_make_rcvmsg will drop the frame if we grow our |
6413 | * memory usage too much | 6413 | * memory usage too much |
6414 | */ | 6414 | */ |
6415 | if (*sk->sk_prot_creator->memory_pressure) { | 6415 | if (sk_under_memory_pressure(sk)) { |
6416 | if (sctp_tsnmap_has_gap(map) && | 6416 | if (sctp_tsnmap_has_gap(map) && |
6417 | (sctp_tsnmap_get_ctsn(map) + 1) == tsn) { | 6417 | (sctp_tsnmap_get_ctsn(map) + 1) == tsn) { |
6418 | pr_debug("%s: under pressure, reneging for tsn:%u\n", | 6418 | pr_debug("%s: under pressure, reneging for tsn:%u\n", |
6419 | __func__, tsn); | 6419 | __func__, tsn); |
6420 | deliver = SCTP_CMD_RENEGE; | 6420 | deliver = SCTP_CMD_RENEGE; |
6421 | } | 6421 | } else { |
6422 | sk_mem_reclaim(sk); | ||
6423 | } | ||
6422 | } | 6424 | } |
6423 | 6425 | ||
6424 | /* | 6426 | /* |
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c index 8cb7d9858270..c2a7478587ab 100644 --- a/net/sctp/ulpevent.c +++ b/net/sctp/ulpevent.c | |||
@@ -634,8 +634,9 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc, | |||
634 | gfp_t gfp) | 634 | gfp_t gfp) |
635 | { | 635 | { |
636 | struct sctp_ulpevent *event = NULL; | 636 | struct sctp_ulpevent *event = NULL; |
637 | struct sk_buff *skb; | 637 | struct sk_buff *skb = chunk->skb; |
638 | size_t padding, len; | 638 | struct sock *sk = asoc->base.sk; |
639 | size_t padding, datalen; | ||
639 | int rx_count; | 640 | int rx_count; |
640 | 641 | ||
641 | /* | 642 | /* |
@@ -646,15 +647,12 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc, | |||
646 | if (asoc->ep->rcvbuf_policy) | 647 | if (asoc->ep->rcvbuf_policy) |
647 | rx_count = atomic_read(&asoc->rmem_alloc); | 648 | rx_count = atomic_read(&asoc->rmem_alloc); |
648 | else | 649 | else |
649 | rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc); | 650 | rx_count = atomic_read(&sk->sk_rmem_alloc); |
650 | 651 | ||
651 | if (rx_count >= asoc->base.sk->sk_rcvbuf) { | 652 | datalen = ntohs(chunk->chunk_hdr->length); |
652 | 653 | ||
653 | if ((asoc->base.sk->sk_userlocks & SOCK_RCVBUF_LOCK) || | 654 | if (rx_count >= sk->sk_rcvbuf || !sk_rmem_schedule(sk, skb, datalen)) |
654 | (!sk_rmem_schedule(asoc->base.sk, chunk->skb, | 655 | goto fail; |
655 | chunk->skb->truesize))) | ||
656 | goto fail; | ||
657 | } | ||
658 | 656 | ||
659 | /* Clone the original skb, sharing the data. */ | 657 | /* Clone the original skb, sharing the data. */ |
660 | skb = skb_clone(chunk->skb, gfp); | 658 | skb = skb_clone(chunk->skb, gfp); |
@@ -681,8 +679,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc, | |||
681 | * The sender should never pad with more than 3 bytes. The receiver | 679 | * The sender should never pad with more than 3 bytes. The receiver |
682 | * MUST ignore the padding bytes. | 680 | * MUST ignore the padding bytes. |
683 | */ | 681 | */ |
684 | len = ntohs(chunk->chunk_hdr->length); | 682 | padding = SCTP_PAD4(datalen) - datalen; |
685 | padding = SCTP_PAD4(len) - len; | ||
686 | 683 | ||
687 | /* Fixup cloned skb with just this chunks data. */ | 684 | /* Fixup cloned skb with just this chunks data. */ |
688 | skb_trim(skb, chunk->chunk_end - padding - skb->data); | 685 | skb_trim(skb, chunk->chunk_end - padding - skb->data); |
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c index 7cdc3623fa35..a212fe079c07 100644 --- a/net/sctp/ulpqueue.c +++ b/net/sctp/ulpqueue.c | |||
@@ -1104,7 +1104,8 @@ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, | |||
1104 | freed += sctp_ulpq_renege_frags(ulpq, needed - freed); | 1104 | freed += sctp_ulpq_renege_frags(ulpq, needed - freed); |
1105 | } | 1105 | } |
1106 | /* If able to free enough room, accept this chunk. */ | 1106 | /* If able to free enough room, accept this chunk. */ |
1107 | if (freed >= needed) { | 1107 | if (sk_rmem_schedule(asoc->base.sk, chunk->skb, needed) && |
1108 | freed >= needed) { | ||
1108 | int retval = sctp_ulpq_tail_data(ulpq, chunk, gfp); | 1109 | int retval = sctp_ulpq_tail_data(ulpq, chunk, gfp); |
1109 | /* | 1110 | /* |
1110 | * Enter partial delivery if chunk has not been | 1111 | * Enter partial delivery if chunk has not been |