diff options
author | David Miller <davem@davemloft.net> | 2019-04-11 18:02:07 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2019-04-12 00:33:31 -0400 |
commit | 013b96ec64616b57fc631b304dfcecc5bc288f90 (patch) | |
tree | 0b4e504f19d268f8003d58a82b9cbbf249d36d1d /net/sctp/ulpqueue.c | |
parent | 178ca044aa60cb05102148b635cb82f6986451a3 (diff) |
sctp: Pass sk_buff_head explicitly to sctp_ulpq_tail_event().
Now the SKB list implementation assumption can be removed.
And now that we know that the list head is always non-NULL
we can remove the code blocks dealing with that as well.
Signed-off-by: David S. Miller <davem@davemloft.net>
Acked-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sctp/ulpqueue.c')
-rw-r--r-- | net/sctp/ulpqueue.c | 29 |
1 files changed, 11 insertions, 18 deletions
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c index a698f1a509bf..7cdc3623fa35 100644 --- a/net/sctp/ulpqueue.c +++ b/net/sctp/ulpqueue.c | |||
@@ -130,7 +130,7 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, | |||
130 | */ | 130 | */ |
131 | if (event) { | 131 | if (event) { |
132 | event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0; | 132 | event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0; |
133 | sctp_ulpq_tail_event(ulpq, event); | 133 | sctp_ulpq_tail_event(ulpq, &temp); |
134 | } | 134 | } |
135 | 135 | ||
136 | return event_eor; | 136 | return event_eor; |
@@ -194,18 +194,17 @@ static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq) | |||
194 | return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc); | 194 | return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc); |
195 | } | 195 | } |
196 | 196 | ||
197 | /* If the SKB of 'event' is on a list, it is the first such member | 197 | int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sk_buff_head *skb_list) |
198 | * of that list. | ||
199 | */ | ||
200 | int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) | ||
201 | { | 198 | { |
202 | struct sock *sk = ulpq->asoc->base.sk; | 199 | struct sock *sk = ulpq->asoc->base.sk; |
203 | struct sctp_sock *sp = sctp_sk(sk); | 200 | struct sctp_sock *sp = sctp_sk(sk); |
204 | struct sk_buff_head *queue, *skb_list; | 201 | struct sctp_ulpevent *event; |
205 | struct sk_buff *skb = sctp_event2skb(event); | 202 | struct sk_buff_head *queue; |
203 | struct sk_buff *skb; | ||
206 | int clear_pd = 0; | 204 | int clear_pd = 0; |
207 | 205 | ||
208 | skb_list = (struct sk_buff_head *) skb->prev; | 206 | skb = __skb_peek(skb_list); |
207 | event = sctp_skb2event(skb); | ||
209 | 208 | ||
210 | /* If the socket is just going to throw this away, do not | 209 | /* If the socket is just going to throw this away, do not |
211 | * even try to deliver it. | 210 | * even try to deliver it. |
@@ -258,13 +257,7 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) | |||
258 | } | 257 | } |
259 | } | 258 | } |
260 | 259 | ||
261 | /* If we are harvesting multiple skbs they will be | 260 | skb_queue_splice_tail_init(skb_list, queue); |
262 | * collected on a list. | ||
263 | */ | ||
264 | if (skb_list) | ||
265 | skb_queue_splice_tail_init(skb_list, queue); | ||
266 | else | ||
267 | __skb_queue_tail(queue, skb); | ||
268 | 261 | ||
269 | /* Did we just complete partial delivery and need to get | 262 | /* Did we just complete partial delivery and need to get |
270 | * rolling again? Move pending data to the receive | 263 | * rolling again? Move pending data to the receive |
@@ -757,7 +750,7 @@ static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq) | |||
757 | * sctp_ulpevent for very first SKB on the temp' list. | 750 | * sctp_ulpevent for very first SKB on the temp' list. |
758 | */ | 751 | */ |
759 | if (event) | 752 | if (event) |
760 | sctp_ulpq_tail_event(ulpq, event); | 753 | sctp_ulpq_tail_event(ulpq, &temp); |
761 | } | 754 | } |
762 | } | 755 | } |
763 | 756 | ||
@@ -957,7 +950,7 @@ static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid) | |||
957 | if (event) { | 950 | if (event) { |
958 | /* see if we have more ordered that we can deliver */ | 951 | /* see if we have more ordered that we can deliver */ |
959 | sctp_ulpq_retrieve_ordered(ulpq, event); | 952 | sctp_ulpq_retrieve_ordered(ulpq, event); |
960 | sctp_ulpq_tail_event(ulpq, event); | 953 | sctp_ulpq_tail_event(ulpq, &temp); |
961 | } | 954 | } |
962 | } | 955 | } |
963 | 956 | ||
@@ -1087,7 +1080,7 @@ void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq, | |||
1087 | 1080 | ||
1088 | skb_queue_head_init(&temp); | 1081 | skb_queue_head_init(&temp); |
1089 | __skb_queue_tail(&temp, sctp_event2skb(event)); | 1082 | __skb_queue_tail(&temp, sctp_event2skb(event)); |
1090 | sctp_ulpq_tail_event(ulpq, event); | 1083 | sctp_ulpq_tail_event(ulpq, &temp); |
1091 | sctp_ulpq_set_pd(ulpq); | 1084 | sctp_ulpq_set_pd(ulpq); |
1092 | return; | 1085 | return; |
1093 | } | 1086 | } |