diff options
author | David S. Miller <davem@davemloft.net> | 2005-08-09 22:25:21 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2005-08-29 18:31:14 -0400 |
commit | 8728b834b226ffcf2c94a58530090e292af2a7bf (patch) | |
tree | 2fd51ff3b7097eb3ffc41ea3a1d8b3ba04715b4c /net/sctp/ulpqueue.c | |
parent | 6869c4d8e066e21623c812c448a05f1ed931c9c6 (diff) |
[NET]: Kill skb->list
Remove the "list" member of struct sk_buff, as it is entirely
redundant. All SKB list removal callers know which list the
SKB is on, so storing this in sk_buff does nothing other than
taking up some space.
Two tricky bits were SCTP, which I took care of, and two ATM
drivers which Francois Romieu <romieu@fr.zoreil.com> fixed
up.
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Francois Romieu <romieu@fr.zoreil.com>
Diffstat (limited to 'net/sctp/ulpqueue.c')
-rw-r--r-- | net/sctp/ulpqueue.c | 63 |
1 files changed, 38 insertions, 25 deletions
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c index 8bbc279d6c99..ec2c857eae7f 100644 --- a/net/sctp/ulpqueue.c +++ b/net/sctp/ulpqueue.c | |||
@@ -50,9 +50,9 @@ | |||
50 | 50 | ||
51 | /* Forward declarations for internal helpers. */ | 51 | /* Forward declarations for internal helpers. */ |
52 | static struct sctp_ulpevent * sctp_ulpq_reasm(struct sctp_ulpq *ulpq, | 52 | static struct sctp_ulpevent * sctp_ulpq_reasm(struct sctp_ulpq *ulpq, |
53 | struct sctp_ulpevent *); | 53 | struct sctp_ulpevent *); |
54 | static struct sctp_ulpevent * sctp_ulpq_order(struct sctp_ulpq *, | 54 | static struct sctp_ulpevent * sctp_ulpq_order(struct sctp_ulpq *, |
55 | struct sctp_ulpevent *); | 55 | struct sctp_ulpevent *); |
56 | 56 | ||
57 | /* 1st Level Abstractions */ | 57 | /* 1st Level Abstractions */ |
58 | 58 | ||
@@ -125,7 +125,9 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, | |||
125 | event = sctp_ulpq_order(ulpq, event); | 125 | event = sctp_ulpq_order(ulpq, event); |
126 | } | 126 | } |
127 | 127 | ||
128 | /* Send event to the ULP. */ | 128 | /* Send event to the ULP. 'event' is the sctp_ulpevent for |
129 | * very first SKB on the 'temp' list. | ||
130 | */ | ||
129 | if (event) | 131 | if (event) |
130 | sctp_ulpq_tail_event(ulpq, event); | 132 | sctp_ulpq_tail_event(ulpq, event); |
131 | 133 | ||
@@ -158,14 +160,18 @@ static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq) | |||
158 | return sctp_clear_pd(ulpq->asoc->base.sk); | 160 | return sctp_clear_pd(ulpq->asoc->base.sk); |
159 | } | 161 | } |
160 | 162 | ||
161 | 163 | /* If the SKB of 'event' is on a list, it is the first such member | |
162 | 164 | * of that list. | |
165 | */ | ||
163 | int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) | 166 | int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) |
164 | { | 167 | { |
165 | struct sock *sk = ulpq->asoc->base.sk; | 168 | struct sock *sk = ulpq->asoc->base.sk; |
166 | struct sk_buff_head *queue; | 169 | struct sk_buff_head *queue, *skb_list; |
170 | struct sk_buff *skb = sctp_event2skb(event); | ||
167 | int clear_pd = 0; | 171 | int clear_pd = 0; |
168 | 172 | ||
173 | skb_list = (struct sk_buff_head *) skb->prev; | ||
174 | |||
169 | /* If the socket is just going to throw this away, do not | 175 | /* If the socket is just going to throw this away, do not |
170 | * even try to deliver it. | 176 | * even try to deliver it. |
171 | */ | 177 | */ |
@@ -197,10 +203,10 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) | |||
197 | /* If we are harvesting multiple skbs they will be | 203 | /* If we are harvesting multiple skbs they will be |
198 | * collected on a list. | 204 | * collected on a list. |
199 | */ | 205 | */ |
200 | if (sctp_event2skb(event)->list) | 206 | if (skb_list) |
201 | sctp_skb_list_tail(sctp_event2skb(event)->list, queue); | 207 | sctp_skb_list_tail(skb_list, queue); |
202 | else | 208 | else |
203 | __skb_queue_tail(queue, sctp_event2skb(event)); | 209 | __skb_queue_tail(queue, skb); |
204 | 210 | ||
205 | /* Did we just complete partial delivery and need to get | 211 | /* Did we just complete partial delivery and need to get |
206 | * rolling again? Move pending data to the receive | 212 | * rolling again? Move pending data to the receive |
@@ -214,10 +220,11 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) | |||
214 | return 1; | 220 | return 1; |
215 | 221 | ||
216 | out_free: | 222 | out_free: |
217 | if (sctp_event2skb(event)->list) | 223 | if (skb_list) |
218 | sctp_queue_purge_ulpevents(sctp_event2skb(event)->list); | 224 | sctp_queue_purge_ulpevents(skb_list); |
219 | else | 225 | else |
220 | sctp_ulpevent_free(event); | 226 | sctp_ulpevent_free(event); |
227 | |||
221 | return 0; | 228 | return 0; |
222 | } | 229 | } |
223 | 230 | ||
@@ -269,7 +276,7 @@ static inline void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq, | |||
269 | * payload was fragmented on the way and ip had to reassemble them. | 276 | * payload was fragmented on the way and ip had to reassemble them. |
270 | * We add the rest of skb's to the first skb's fraglist. | 277 | * We add the rest of skb's to the first skb's fraglist. |
271 | */ | 278 | */ |
272 | static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff *f_frag, struct sk_buff *l_frag) | 279 | static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff_head *queue, struct sk_buff *f_frag, struct sk_buff *l_frag) |
273 | { | 280 | { |
274 | struct sk_buff *pos; | 281 | struct sk_buff *pos; |
275 | struct sctp_ulpevent *event; | 282 | struct sctp_ulpevent *event; |
@@ -294,7 +301,7 @@ static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff *f_frag, | |||
294 | skb_shinfo(f_frag)->frag_list = pos; | 301 | skb_shinfo(f_frag)->frag_list = pos; |
295 | 302 | ||
296 | /* Remove the first fragment from the reassembly queue. */ | 303 | /* Remove the first fragment from the reassembly queue. */ |
297 | __skb_unlink(f_frag, f_frag->list); | 304 | __skb_unlink(f_frag, queue); |
298 | while (pos) { | 305 | while (pos) { |
299 | 306 | ||
300 | pnext = pos->next; | 307 | pnext = pos->next; |
@@ -304,7 +311,7 @@ static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff *f_frag, | |||
304 | f_frag->data_len += pos->len; | 311 | f_frag->data_len += pos->len; |
305 | 312 | ||
306 | /* Remove the fragment from the reassembly queue. */ | 313 | /* Remove the fragment from the reassembly queue. */ |
307 | __skb_unlink(pos, pos->list); | 314 | __skb_unlink(pos, queue); |
308 | 315 | ||
309 | /* Break if we have reached the last fragment. */ | 316 | /* Break if we have reached the last fragment. */ |
310 | if (pos == l_frag) | 317 | if (pos == l_frag) |
@@ -375,7 +382,7 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_u | |||
375 | done: | 382 | done: |
376 | return retval; | 383 | return retval; |
377 | found: | 384 | found: |
378 | retval = sctp_make_reassembled_event(first_frag, pos); | 385 | retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, pos); |
379 | if (retval) | 386 | if (retval) |
380 | retval->msg_flags |= MSG_EOR; | 387 | retval->msg_flags |= MSG_EOR; |
381 | goto done; | 388 | goto done; |
@@ -435,7 +442,7 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq | |||
435 | * further. | 442 | * further. |
436 | */ | 443 | */ |
437 | done: | 444 | done: |
438 | retval = sctp_make_reassembled_event(first_frag, last_frag); | 445 | retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, last_frag); |
439 | if (retval && is_last) | 446 | if (retval && is_last) |
440 | retval->msg_flags |= MSG_EOR; | 447 | retval->msg_flags |= MSG_EOR; |
441 | 448 | ||
@@ -527,7 +534,7 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *u | |||
527 | * further. | 534 | * further. |
528 | */ | 535 | */ |
529 | done: | 536 | done: |
530 | retval = sctp_make_reassembled_event(first_frag, last_frag); | 537 | retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, last_frag); |
531 | return retval; | 538 | return retval; |
532 | } | 539 | } |
533 | 540 | ||
@@ -537,6 +544,7 @@ done: | |||
537 | static inline void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq, | 544 | static inline void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq, |
538 | struct sctp_ulpevent *event) | 545 | struct sctp_ulpevent *event) |
539 | { | 546 | { |
547 | struct sk_buff_head *event_list; | ||
540 | struct sk_buff *pos, *tmp; | 548 | struct sk_buff *pos, *tmp; |
541 | struct sctp_ulpevent *cevent; | 549 | struct sctp_ulpevent *cevent; |
542 | struct sctp_stream *in; | 550 | struct sctp_stream *in; |
@@ -547,6 +555,8 @@ static inline void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq, | |||
547 | ssn = event->ssn; | 555 | ssn = event->ssn; |
548 | in = &ulpq->asoc->ssnmap->in; | 556 | in = &ulpq->asoc->ssnmap->in; |
549 | 557 | ||
558 | event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev; | ||
559 | |||
550 | /* We are holding the chunks by stream, by SSN. */ | 560 | /* We are holding the chunks by stream, by SSN. */ |
551 | sctp_skb_for_each(pos, &ulpq->lobby, tmp) { | 561 | sctp_skb_for_each(pos, &ulpq->lobby, tmp) { |
552 | cevent = (struct sctp_ulpevent *) pos->cb; | 562 | cevent = (struct sctp_ulpevent *) pos->cb; |
@@ -567,10 +577,10 @@ static inline void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq, | |||
567 | /* Found it, so mark in the ssnmap. */ | 577 | /* Found it, so mark in the ssnmap. */ |
568 | sctp_ssn_next(in, sid); | 578 | sctp_ssn_next(in, sid); |
569 | 579 | ||
570 | __skb_unlink(pos, pos->list); | 580 | __skb_unlink(pos, &ulpq->lobby); |
571 | 581 | ||
572 | /* Attach all gathered skbs to the event. */ | 582 | /* Attach all gathered skbs to the event. */ |
573 | __skb_queue_tail(sctp_event2skb(event)->list, pos); | 583 | __skb_queue_tail(event_list, pos); |
574 | } | 584 | } |
575 | } | 585 | } |
576 | 586 | ||
@@ -626,7 +636,7 @@ static inline void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq, | |||
626 | } | 636 | } |
627 | 637 | ||
628 | static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq, | 638 | static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq, |
629 | struct sctp_ulpevent *event) | 639 | struct sctp_ulpevent *event) |
630 | { | 640 | { |
631 | __u16 sid, ssn; | 641 | __u16 sid, ssn; |
632 | struct sctp_stream *in; | 642 | struct sctp_stream *in; |
@@ -667,7 +677,7 @@ static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq) | |||
667 | { | 677 | { |
668 | struct sk_buff *pos, *tmp; | 678 | struct sk_buff *pos, *tmp; |
669 | struct sctp_ulpevent *cevent; | 679 | struct sctp_ulpevent *cevent; |
670 | struct sctp_ulpevent *event = NULL; | 680 | struct sctp_ulpevent *event; |
671 | struct sctp_stream *in; | 681 | struct sctp_stream *in; |
672 | struct sk_buff_head temp; | 682 | struct sk_buff_head temp; |
673 | __u16 csid, cssn; | 683 | __u16 csid, cssn; |
@@ -675,6 +685,8 @@ static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq) | |||
675 | in = &ulpq->asoc->ssnmap->in; | 685 | in = &ulpq->asoc->ssnmap->in; |
676 | 686 | ||
677 | /* We are holding the chunks by stream, by SSN. */ | 687 | /* We are holding the chunks by stream, by SSN. */ |
688 | skb_queue_head_init(&temp); | ||
689 | event = NULL; | ||
678 | sctp_skb_for_each(pos, &ulpq->lobby, tmp) { | 690 | sctp_skb_for_each(pos, &ulpq->lobby, tmp) { |
679 | cevent = (struct sctp_ulpevent *) pos->cb; | 691 | cevent = (struct sctp_ulpevent *) pos->cb; |
680 | csid = cevent->stream; | 692 | csid = cevent->stream; |
@@ -686,19 +698,20 @@ static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq) | |||
686 | /* Found it, so mark in the ssnmap. */ | 698 | /* Found it, so mark in the ssnmap. */ |
687 | sctp_ssn_next(in, csid); | 699 | sctp_ssn_next(in, csid); |
688 | 700 | ||
689 | __skb_unlink(pos, pos->list); | 701 | __skb_unlink(pos, &ulpq->lobby); |
690 | if (!event) { | 702 | if (!event) { |
691 | /* Create a temporary list to collect chunks on. */ | 703 | /* Create a temporary list to collect chunks on. */ |
692 | event = sctp_skb2event(pos); | 704 | event = sctp_skb2event(pos); |
693 | skb_queue_head_init(&temp); | ||
694 | __skb_queue_tail(&temp, sctp_event2skb(event)); | 705 | __skb_queue_tail(&temp, sctp_event2skb(event)); |
695 | } else { | 706 | } else { |
696 | /* Attach all gathered skbs to the event. */ | 707 | /* Attach all gathered skbs to the event. */ |
697 | __skb_queue_tail(sctp_event2skb(event)->list, pos); | 708 | __skb_queue_tail(&temp, pos); |
698 | } | 709 | } |
699 | } | 710 | } |
700 | 711 | ||
701 | /* Send event to the ULP. */ | 712 | /* Send event to the ULP. 'event' is the sctp_ulpevent for |
713 | * very first SKB on the 'temp' list. | ||
714 | */ | ||
702 | if (event) | 715 | if (event) |
703 | sctp_ulpq_tail_event(ulpq, event); | 716 | sctp_ulpq_tail_event(ulpq, event); |
704 | } | 717 | } |