aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/loopback.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/loopback.c')
-rw-r--r--drivers/net/loopback.c67
1 files changed, 0 insertions, 67 deletions
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 49f6bc036a92..3b43bfd85a0f 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -64,68 +64,6 @@ struct pcpu_lstats {
64 unsigned long bytes; 64 unsigned long bytes;
65}; 65};
66 66
67/* KISS: just allocate small chunks and copy bits.
68 *
69 * So, in fact, this is documentation, explaining what we expect
70 * of largesending device modulo TCP checksum, which is ignored for loopback.
71 */
72
73#ifdef LOOPBACK_TSO
74static void emulate_large_send_offload(struct sk_buff *skb)
75{
76 struct iphdr *iph = ip_hdr(skb);
77 struct tcphdr *th = (struct tcphdr *)(skb_network_header(skb) +
78 (iph->ihl * 4));
79 unsigned int doffset = (iph->ihl + th->doff) * 4;
80 unsigned int mtu = skb_shinfo(skb)->gso_size + doffset;
81 unsigned int offset = 0;
82 u32 seq = ntohl(th->seq);
83 u16 id = ntohs(iph->id);
84
85 while (offset + doffset < skb->len) {
86 unsigned int frag_size = min(mtu, skb->len - offset) - doffset;
87 struct sk_buff *nskb = alloc_skb(mtu + 32, GFP_ATOMIC);
88
89 if (!nskb)
90 break;
91 skb_reserve(nskb, 32);
92 skb_set_mac_header(nskb, -ETH_HLEN);
93 skb_reset_network_header(nskb);
94 iph = ip_hdr(nskb);
95 skb_copy_to_linear_data(nskb, skb_network_header(skb),
96 doffset);
97 if (skb_copy_bits(skb,
98 doffset + offset,
99 nskb->data + doffset,
100 frag_size))
101 BUG();
102 skb_put(nskb, doffset + frag_size);
103 nskb->ip_summed = CHECKSUM_UNNECESSARY;
104 nskb->dev = skb->dev;
105 nskb->priority = skb->priority;
106 nskb->protocol = skb->protocol;
107 nskb->dst = dst_clone(skb->dst);
108 memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
109 nskb->pkt_type = skb->pkt_type;
110
111 th = (struct tcphdr *)(skb_network_header(nskb) + iph->ihl * 4);
112 iph->tot_len = htons(frag_size + doffset);
113 iph->id = htons(id);
114 iph->check = 0;
115 iph->check = ip_fast_csum((unsigned char *) iph, iph->ihl);
116 th->seq = htonl(seq);
117 if (offset + doffset + frag_size < skb->len)
118 th->fin = th->psh = 0;
119 netif_rx(nskb);
120 offset += frag_size;
121 seq += frag_size;
122 id++;
123 }
124
125 dev_kfree_skb(skb);
126}
127#endif /* LOOPBACK_TSO */
128
129/* 67/*
130 * The higher levels take care of making this non-reentrant (it's 68 * The higher levels take care of making this non-reentrant (it's
131 * called with bh's disabled). 69 * called with bh's disabled).
@@ -137,9 +75,6 @@ static int loopback_xmit(struct sk_buff *skb, struct net_device *dev)
137 skb_orphan(skb); 75 skb_orphan(skb);
138 76
139 skb->protocol = eth_type_trans(skb,dev); 77 skb->protocol = eth_type_trans(skb,dev);
140#ifndef LOOPBACK_MUST_CHECKSUM
141 skb->ip_summed = CHECKSUM_UNNECESSARY;
142#endif
143 78
144#ifdef LOOPBACK_TSO 79#ifdef LOOPBACK_TSO
145 if (skb_is_gso(skb)) { 80 if (skb_is_gso(skb)) {
@@ -234,9 +169,7 @@ static void loopback_setup(struct net_device *dev)
234 dev->type = ARPHRD_LOOPBACK; /* 0x0001*/ 169 dev->type = ARPHRD_LOOPBACK; /* 0x0001*/
235 dev->flags = IFF_LOOPBACK; 170 dev->flags = IFF_LOOPBACK;
236 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST 171 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST
237#ifdef LOOPBACK_TSO
238 | NETIF_F_TSO 172 | NETIF_F_TSO
239#endif
240 | NETIF_F_NO_CSUM 173 | NETIF_F_NO_CSUM
241 | NETIF_F_HIGHDMA 174 | NETIF_F_HIGHDMA
242 | NETIF_F_LLTX 175 | NETIF_F_LLTX