aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_input.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2005-08-09 22:25:21 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2005-08-29 18:31:14 -0400
commit8728b834b226ffcf2c94a58530090e292af2a7bf (patch)
tree2fd51ff3b7097eb3ffc41ea3a1d8b3ba04715b4c /net/ipv4/tcp_input.c
parent6869c4d8e066e21623c812c448a05f1ed931c9c6 (diff)
[NET]: Kill skb->list
Remove the "list" member of struct sk_buff, as it is entirely redundant. All SKB list removal callers know which list the SKB is on, so storing this in sk_buff does nothing other than taking up some space. Two tricky bits were SCTP, which I took care of, and two ATM drivers which Francois Romieu <romieu@fr.zoreil.com> fixed up. Signed-off-by: David S. Miller <davem@davemloft.net> Signed-off-by: Francois Romieu <romieu@fr.zoreil.com>
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r--net/ipv4/tcp_input.c29
1 files changed, 16 insertions, 13 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 53a8a5399f1e..ffa24025cd02 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2085,7 +2085,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p, s32 *seq_usrtt
2085 seq_rtt = now - scb->when; 2085 seq_rtt = now - scb->when;
2086 tcp_dec_pcount_approx(&tp->fackets_out, skb); 2086 tcp_dec_pcount_approx(&tp->fackets_out, skb);
2087 tcp_packets_out_dec(tp, skb); 2087 tcp_packets_out_dec(tp, skb);
2088 __skb_unlink(skb, skb->list); 2088 __skb_unlink(skb, &sk->sk_write_queue);
2089 sk_stream_free_skb(sk, skb); 2089 sk_stream_free_skb(sk, skb);
2090 } 2090 }
2091 2091
@@ -2853,7 +2853,7 @@ static void tcp_ofo_queue(struct sock *sk)
2853 2853
2854 if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { 2854 if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
2855 SOCK_DEBUG(sk, "ofo packet was already received \n"); 2855 SOCK_DEBUG(sk, "ofo packet was already received \n");
2856 __skb_unlink(skb, skb->list); 2856 __skb_unlink(skb, &tp->out_of_order_queue);
2857 __kfree_skb(skb); 2857 __kfree_skb(skb);
2858 continue; 2858 continue;
2859 } 2859 }
@@ -2861,7 +2861,7 @@ static void tcp_ofo_queue(struct sock *sk)
2861 tp->rcv_nxt, TCP_SKB_CB(skb)->seq, 2861 tp->rcv_nxt, TCP_SKB_CB(skb)->seq,
2862 TCP_SKB_CB(skb)->end_seq); 2862 TCP_SKB_CB(skb)->end_seq);
2863 2863
2864 __skb_unlink(skb, skb->list); 2864 __skb_unlink(skb, &tp->out_of_order_queue);
2865 __skb_queue_tail(&sk->sk_receive_queue, skb); 2865 __skb_queue_tail(&sk->sk_receive_queue, skb);
2866 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; 2866 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
2867 if(skb->h.th->fin) 2867 if(skb->h.th->fin)
@@ -3027,7 +3027,7 @@ drop:
3027 u32 end_seq = TCP_SKB_CB(skb)->end_seq; 3027 u32 end_seq = TCP_SKB_CB(skb)->end_seq;
3028 3028
3029 if (seq == TCP_SKB_CB(skb1)->end_seq) { 3029 if (seq == TCP_SKB_CB(skb1)->end_seq) {
3030 __skb_append(skb1, skb); 3030 __skb_append(skb1, skb, &tp->out_of_order_queue);
3031 3031
3032 if (!tp->rx_opt.num_sacks || 3032 if (!tp->rx_opt.num_sacks ||
3033 tp->selective_acks[0].end_seq != seq) 3033 tp->selective_acks[0].end_seq != seq)
@@ -3071,7 +3071,7 @@ drop:
3071 tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq, end_seq); 3071 tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq, end_seq);
3072 break; 3072 break;
3073 } 3073 }
3074 __skb_unlink(skb1, skb1->list); 3074 __skb_unlink(skb1, &tp->out_of_order_queue);
3075 tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq, TCP_SKB_CB(skb1)->end_seq); 3075 tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq, TCP_SKB_CB(skb1)->end_seq);
3076 __kfree_skb(skb1); 3076 __kfree_skb(skb1);
3077 } 3077 }
@@ -3088,8 +3088,9 @@ add_sack:
3088 * simplifies code) 3088 * simplifies code)
3089 */ 3089 */
3090static void 3090static void
3091tcp_collapse(struct sock *sk, struct sk_buff *head, 3091tcp_collapse(struct sock *sk, struct sk_buff_head *list,
3092 struct sk_buff *tail, u32 start, u32 end) 3092 struct sk_buff *head, struct sk_buff *tail,
3093 u32 start, u32 end)
3093{ 3094{
3094 struct sk_buff *skb; 3095 struct sk_buff *skb;
3095 3096
@@ -3099,7 +3100,7 @@ tcp_collapse(struct sock *sk, struct sk_buff *head,
3099 /* No new bits? It is possible on ofo queue. */ 3100 /* No new bits? It is possible on ofo queue. */
3100 if (!before(start, TCP_SKB_CB(skb)->end_seq)) { 3101 if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
3101 struct sk_buff *next = skb->next; 3102 struct sk_buff *next = skb->next;
3102 __skb_unlink(skb, skb->list); 3103 __skb_unlink(skb, list);
3103 __kfree_skb(skb); 3104 __kfree_skb(skb);
3104 NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED); 3105 NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED);
3105 skb = next; 3106 skb = next;
@@ -3145,7 +3146,7 @@ tcp_collapse(struct sock *sk, struct sk_buff *head,
3145 nskb->mac.raw = nskb->head + (skb->mac.raw-skb->head); 3146 nskb->mac.raw = nskb->head + (skb->mac.raw-skb->head);
3146 memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); 3147 memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
3147 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start; 3148 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start;
3148 __skb_insert(nskb, skb->prev, skb, skb->list); 3149 __skb_insert(nskb, skb->prev, skb, list);
3149 sk_stream_set_owner_r(nskb, sk); 3150 sk_stream_set_owner_r(nskb, sk);
3150 3151
3151 /* Copy data, releasing collapsed skbs. */ 3152 /* Copy data, releasing collapsed skbs. */
@@ -3164,7 +3165,7 @@ tcp_collapse(struct sock *sk, struct sk_buff *head,
3164 } 3165 }
3165 if (!before(start, TCP_SKB_CB(skb)->end_seq)) { 3166 if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
3166 struct sk_buff *next = skb->next; 3167 struct sk_buff *next = skb->next;
3167 __skb_unlink(skb, skb->list); 3168 __skb_unlink(skb, list);
3168 __kfree_skb(skb); 3169 __kfree_skb(skb);
3169 NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED); 3170 NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED);
3170 skb = next; 3171 skb = next;
@@ -3200,7 +3201,8 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
3200 if (skb == (struct sk_buff *)&tp->out_of_order_queue || 3201 if (skb == (struct sk_buff *)&tp->out_of_order_queue ||
3201 after(TCP_SKB_CB(skb)->seq, end) || 3202 after(TCP_SKB_CB(skb)->seq, end) ||
3202 before(TCP_SKB_CB(skb)->end_seq, start)) { 3203 before(TCP_SKB_CB(skb)->end_seq, start)) {
3203 tcp_collapse(sk, head, skb, start, end); 3204 tcp_collapse(sk, &tp->out_of_order_queue,
3205 head, skb, start, end);
3204 head = skb; 3206 head = skb;
3205 if (skb == (struct sk_buff *)&tp->out_of_order_queue) 3207 if (skb == (struct sk_buff *)&tp->out_of_order_queue)
3206 break; 3208 break;
@@ -3237,7 +3239,8 @@ static int tcp_prune_queue(struct sock *sk)
3237 tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss); 3239 tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
3238 3240
3239 tcp_collapse_ofo_queue(sk); 3241 tcp_collapse_ofo_queue(sk);
3240 tcp_collapse(sk, sk->sk_receive_queue.next, 3242 tcp_collapse(sk, &sk->sk_receive_queue,
3243 sk->sk_receive_queue.next,
3241 (struct sk_buff*)&sk->sk_receive_queue, 3244 (struct sk_buff*)&sk->sk_receive_queue,
3242 tp->copied_seq, tp->rcv_nxt); 3245 tp->copied_seq, tp->rcv_nxt);
3243 sk_stream_mem_reclaim(sk); 3246 sk_stream_mem_reclaim(sk);
@@ -3462,7 +3465,7 @@ static void tcp_check_urg(struct sock * sk, struct tcphdr * th)
3462 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); 3465 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
3463 tp->copied_seq++; 3466 tp->copied_seq++;
3464 if (skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)) { 3467 if (skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)) {
3465 __skb_unlink(skb, skb->list); 3468 __skb_unlink(skb, &sk->sk_receive_queue);
3466 __kfree_skb(skb); 3469 __kfree_skb(skb);
3467 } 3470 }
3468 } 3471 }