aboutsummaryrefslogtreecommitdiffstats
path: root/net/sctp/ulpqueue.c
diff options
context:
space:
mode:
authorYOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>2007-02-09 09:25:18 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2007-02-11 02:20:11 -0500
commitd808ad9ab8b1109239027c248c4652503b9d3029 (patch)
treecdd09b4987a4efd687a0a138491d626f8b674de5 /net/sctp/ulpqueue.c
parent10297b99315e5e08fe623ba56da35db1fee69ba9 (diff)
[NET] SCTP: Fix whitespace errors.
Signed-off-by: YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sctp/ulpqueue.c')
-rw-r--r--net/sctp/ulpqueue.c60
1 files changed, 30 insertions, 30 deletions
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index e1d144275f97..f4759a9bdaee 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -191,7 +191,7 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
191 queue = &sk->sk_receive_queue; 191 queue = &sk->sk_receive_queue;
192 } else if (ulpq->pd_mode) { 192 } else if (ulpq->pd_mode) {
193 if (event->msg_flags & MSG_NOTIFICATION) 193 if (event->msg_flags & MSG_NOTIFICATION)
194 queue = &sctp_sk(sk)->pd_lobby; 194 queue = &sctp_sk(sk)->pd_lobby;
195 else { 195 else {
196 clear_pd = event->msg_flags & MSG_EOR; 196 clear_pd = event->msg_flags & MSG_EOR;
197 queue = &sk->sk_receive_queue; 197 queue = &sk->sk_receive_queue;
@@ -298,32 +298,32 @@ static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff_head *qu
298 */ 298 */
299 if (last) 299 if (last)
300 last->next = pos; 300 last->next = pos;
301 else { 301 else {
302 if (skb_cloned(f_frag)) { 302 if (skb_cloned(f_frag)) {
303 /* This is a cloned skb, we can't just modify 303 /* This is a cloned skb, we can't just modify
304 * the frag_list. We need a new skb to do that. 304 * the frag_list. We need a new skb to do that.
305 * Instead of calling skb_unshare(), we'll do it 305 * Instead of calling skb_unshare(), we'll do it
306 * ourselves since we need to delay the free. 306 * ourselves since we need to delay the free.
307 */ 307 */
308 new = skb_copy(f_frag, GFP_ATOMIC); 308 new = skb_copy(f_frag, GFP_ATOMIC);
309 if (!new) 309 if (!new)
310 return NULL; /* try again later */ 310 return NULL; /* try again later */
311 311
312 sctp_skb_set_owner_r(new, f_frag->sk); 312 sctp_skb_set_owner_r(new, f_frag->sk);
313 313
314 skb_shinfo(new)->frag_list = pos; 314 skb_shinfo(new)->frag_list = pos;
315 } else 315 } else
316 skb_shinfo(f_frag)->frag_list = pos; 316 skb_shinfo(f_frag)->frag_list = pos;
317 } 317 }
318 318
319 /* Remove the first fragment from the reassembly queue. */ 319 /* Remove the first fragment from the reassembly queue. */
320 __skb_unlink(f_frag, queue); 320 __skb_unlink(f_frag, queue);
321 321
322 /* if we did unshare, then free the old skb and re-assign */ 322 /* if we did unshare, then free the old skb and re-assign */
323 if (new) { 323 if (new) {
324 kfree_skb(f_frag); 324 kfree_skb(f_frag);
325 f_frag = new; 325 f_frag = new;
326 } 326 }
327 327
328 while (pos) { 328 while (pos) {
329 329
@@ -335,7 +335,7 @@ static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff_head *qu
335 335
336 /* Remove the fragment from the reassembly queue. */ 336 /* Remove the fragment from the reassembly queue. */
337 __skb_unlink(pos, queue); 337 __skb_unlink(pos, queue);
338 338
339 /* Break if we have reached the last fragment. */ 339 /* Break if we have reached the last fragment. */
340 if (pos == l_frag) 340 if (pos == l_frag)
341 break; 341 break;
@@ -624,7 +624,7 @@ static inline void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
624 624
625 sid = event->stream; 625 sid = event->stream;
626 ssn = event->ssn; 626 ssn = event->ssn;
627 627
628 cevent = (struct sctp_ulpevent *) pos->cb; 628 cevent = (struct sctp_ulpevent *) pos->cb;
629 csid = cevent->stream; 629 csid = cevent->stream;
630 cssn = cevent->ssn; 630 cssn = cevent->ssn;
@@ -718,11 +718,11 @@ static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq)
718 if (cssn != sctp_ssn_peek(in, csid)) 718 if (cssn != sctp_ssn_peek(in, csid))
719 break; 719 break;
720 720
721 /* Found it, so mark in the ssnmap. */ 721 /* Found it, so mark in the ssnmap. */
722 sctp_ssn_next(in, csid); 722 sctp_ssn_next(in, csid);
723 723
724 __skb_unlink(pos, &ulpq->lobby); 724 __skb_unlink(pos, &ulpq->lobby);
725 if (!event) { 725 if (!event) {
726 /* Create a temporary list to collect chunks on. */ 726 /* Create a temporary list to collect chunks on. */
727 event = sctp_skb2event(pos); 727 event = sctp_skb2event(pos);
728 __skb_queue_tail(&temp, sctp_event2skb(event)); 728 __skb_queue_tail(&temp, sctp_event2skb(event));
@@ -755,7 +755,7 @@ void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
755 sctp_ssn_skip(in, sid, ssn); 755 sctp_ssn_skip(in, sid, ssn);
756 756
757 /* Go find any other chunks that were waiting for 757 /* Go find any other chunks that were waiting for
758 * ordering and deliver them if needed. 758 * ordering and deliver them if needed.
759 */ 759 */
760 sctp_ulpq_reap_ordered(ulpq); 760 sctp_ulpq_reap_ordered(ulpq);
761 return; 761 return;
@@ -849,7 +849,7 @@ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
849 if (chunk) { 849 if (chunk) {
850 needed = ntohs(chunk->chunk_hdr->length); 850 needed = ntohs(chunk->chunk_hdr->length);
851 needed -= sizeof(sctp_data_chunk_t); 851 needed -= sizeof(sctp_data_chunk_t);
852 } else 852 } else
853 needed = SCTP_DEFAULT_MAXWINDOW; 853 needed = SCTP_DEFAULT_MAXWINDOW;
854 854
855 freed = 0; 855 freed = 0;
@@ -866,7 +866,7 @@ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
866 tsn = ntohl(chunk->subh.data_hdr->tsn); 866 tsn = ntohl(chunk->subh.data_hdr->tsn);
867 sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn); 867 sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn);
868 sctp_ulpq_tail_data(ulpq, chunk, gfp); 868 sctp_ulpq_tail_data(ulpq, chunk, gfp);
869 869
870 sctp_ulpq_partial_delivery(ulpq, chunk, gfp); 870 sctp_ulpq_partial_delivery(ulpq, chunk, gfp);
871 } 871 }
872 872