aboutsummaryrefslogtreecommitdiffstats
path: root/net/sctp/ulpqueue.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2017-12-22 11:16:31 -0500
committerDavid S. Miller <davem@davemloft.net>2017-12-22 11:16:31 -0500
commitfba961ab29e5ffb055592442808bb0f7962e05da (patch)
tree5180c384b79399c469e0ed88211114e6ab249484 /net/sctp/ulpqueue.c
parent0a80f0c26bf5a131892b91db5318eb67608006d2 (diff)
parentead68f216110170ec729e2c4dec0aad6d38259d7 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Lots of overlapping changes. Also on the net-next side the XDP state management is handled more in the generic layers so undo the 'net' nfp fix which isn't applicable in net-next. Include a necessary change by Jakub Kicinski, with log message: ==================== cls_bpf no longer takes care of offload tracking. Make sure netdevsim performs necessary checks. This fixes a warning caused by TC trying to remove a filter it has not added. Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com> Reviewed-by: Quentin Monnet <quentin.monnet@netronome.com> ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sctp/ulpqueue.c')
-rw-r--r--net/sctp/ulpqueue.c24
1 files changed, 8 insertions, 16 deletions
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index 97fae53310e0..0b427100b0d4 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -1093,29 +1093,21 @@ void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
1093void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, 1093void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
1094 gfp_t gfp) 1094 gfp_t gfp)
1095{ 1095{
1096 struct sctp_association *asoc; 1096 struct sctp_association *asoc = ulpq->asoc;
1097 __u16 needed, freed; 1097 __u32 freed = 0;
1098 1098 __u16 needed;
1099 asoc = ulpq->asoc;
1100 1099
1101 if (chunk) { 1100 needed = ntohs(chunk->chunk_hdr->length) -
1102 needed = ntohs(chunk->chunk_hdr->length); 1101 sizeof(struct sctp_data_chunk);
1103 needed -= sizeof(struct sctp_data_chunk);
1104 } else
1105 needed = SCTP_DEFAULT_MAXWINDOW;
1106
1107 freed = 0;
1108 1102
1109 if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) { 1103 if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
1110 freed = sctp_ulpq_renege_order(ulpq, needed); 1104 freed = sctp_ulpq_renege_order(ulpq, needed);
1111 if (freed < needed) { 1105 if (freed < needed)
1112 freed += sctp_ulpq_renege_frags(ulpq, needed - freed); 1106 freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
1113 }
1114 } 1107 }
1115 /* If able to free enough room, accept this chunk. */ 1108 /* If able to free enough room, accept this chunk. */
1116 if (chunk && (freed >= needed)) { 1109 if (freed >= needed) {
1117 int retval; 1110 int retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
1118 retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
1119 /* 1111 /*
1120 * Enter partial delivery if chunk has not been 1112 * Enter partial delivery if chunk has not been
1121 * delivered; otherwise, drain the reassembly queue. 1113 * delivered; otherwise, drain the reassembly queue.