aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorEric Biederman <ebiederm@aristanetworks.com>2009-03-04 02:36:04 -0500
committerDavid S. Miller <davem@davemloft.net>2009-03-04 02:36:04 -0500
commit38d408152a86598a50680a82fe3353b506630409 (patch)
treea285e4883bdf04eb68201f379b52fa068cf4b2ed /drivers
parentabb79972b4d1dff00f79cb0d123173abac48a6ae (diff)
veth: Allow setting the L3 MTU
The limitation to only 1500 byte mtu's limits the utility of the veth device for testing routing. So implement implement a configurable MTU. For consistency I drop packets on the receive side when they are larger than the MTU. I count those drops. And I allow a little padding for vlan headers. I also test the mtu when a new device is created with netlink because that path currently bypasses the current mtu setting code. Signed-off-by: Eric Biederman <ebiederm@aristanetworks.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/veth.c45
1 files changed, 39 insertions, 6 deletions
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 124fe75b8a8a..015db1cece72 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -19,12 +19,17 @@
19#define DRV_NAME "veth" 19#define DRV_NAME "veth"
20#define DRV_VERSION "1.0" 20#define DRV_VERSION "1.0"
21 21
22#define MIN_MTU 68 /* Min L3 MTU */
23#define MAX_MTU 65535 /* Max L3 MTU (arbitrary) */
24#define MTU_PAD (ETH_HLEN + 4) /* Max difference between L2 and L3 size MTU */
25
22struct veth_net_stats { 26struct veth_net_stats {
23 unsigned long rx_packets; 27 unsigned long rx_packets;
24 unsigned long tx_packets; 28 unsigned long tx_packets;
25 unsigned long rx_bytes; 29 unsigned long rx_bytes;
26 unsigned long tx_bytes; 30 unsigned long tx_bytes;
27 unsigned long tx_dropped; 31 unsigned long tx_dropped;
32 unsigned long rx_dropped;
28}; 33};
29 34
30struct veth_priv { 35struct veth_priv {
@@ -147,7 +152,7 @@ static int veth_xmit(struct sk_buff *skb, struct net_device *dev)
147{ 152{
148 struct net_device *rcv = NULL; 153 struct net_device *rcv = NULL;
149 struct veth_priv *priv, *rcv_priv; 154 struct veth_priv *priv, *rcv_priv;
150 struct veth_net_stats *stats; 155 struct veth_net_stats *stats, *rcv_stats;
151 int length, cpu; 156 int length, cpu;
152 157
153 skb_orphan(skb); 158 skb_orphan(skb);
@@ -158,9 +163,13 @@ static int veth_xmit(struct sk_buff *skb, struct net_device *dev)
158 163
159 cpu = smp_processor_id(); 164 cpu = smp_processor_id();
160 stats = per_cpu_ptr(priv->stats, cpu); 165 stats = per_cpu_ptr(priv->stats, cpu);
166 rcv_stats = per_cpu_ptr(rcv_priv->stats, cpu);
161 167
162 if (!(rcv->flags & IFF_UP)) 168 if (!(rcv->flags & IFF_UP))
163 goto outf; 169 goto tx_drop;
170
171 if (skb->len > (rcv->mtu + MTU_PAD))
172 goto rx_drop;
164 173
165 skb->pkt_type = PACKET_HOST; 174 skb->pkt_type = PACKET_HOST;
166 skb->protocol = eth_type_trans(skb, rcv); 175 skb->protocol = eth_type_trans(skb, rcv);
@@ -178,17 +187,21 @@ static int veth_xmit(struct sk_buff *skb, struct net_device *dev)
178 stats->tx_bytes += length; 187 stats->tx_bytes += length;
179 stats->tx_packets++; 188 stats->tx_packets++;
180 189
181 stats = per_cpu_ptr(rcv_priv->stats, cpu); 190 rcv_stats->rx_bytes += length;
182 stats->rx_bytes += length; 191 rcv_stats->rx_packets++;
183 stats->rx_packets++;
184 192
185 netif_rx(skb); 193 netif_rx(skb);
186 return 0; 194 return 0;
187 195
188outf: 196tx_drop:
189 kfree_skb(skb); 197 kfree_skb(skb);
190 stats->tx_dropped++; 198 stats->tx_dropped++;
191 return 0; 199 return 0;
200
201rx_drop:
202 kfree_skb(skb);
203 rcv_stats->rx_dropped++;
204 return 0;
192} 205}
193 206
194/* 207/*
@@ -210,6 +223,7 @@ static struct net_device_stats *veth_get_stats(struct net_device *dev)
210 dev_stats->rx_bytes = 0; 223 dev_stats->rx_bytes = 0;
211 dev_stats->tx_bytes = 0; 224 dev_stats->tx_bytes = 0;
212 dev_stats->tx_dropped = 0; 225 dev_stats->tx_dropped = 0;
226 dev_stats->rx_dropped = 0;
213 227
214 for_each_online_cpu(cpu) { 228 for_each_online_cpu(cpu) {
215 stats = per_cpu_ptr(priv->stats, cpu); 229 stats = per_cpu_ptr(priv->stats, cpu);
@@ -219,6 +233,7 @@ static struct net_device_stats *veth_get_stats(struct net_device *dev)
219 dev_stats->rx_bytes += stats->rx_bytes; 233 dev_stats->rx_bytes += stats->rx_bytes;
220 dev_stats->tx_bytes += stats->tx_bytes; 234 dev_stats->tx_bytes += stats->tx_bytes;
221 dev_stats->tx_dropped += stats->tx_dropped; 235 dev_stats->tx_dropped += stats->tx_dropped;
236 dev_stats->rx_dropped += stats->rx_dropped;
222 } 237 }
223 238
224 return dev_stats; 239 return dev_stats;
@@ -249,6 +264,19 @@ static int veth_close(struct net_device *dev)
249 return 0; 264 return 0;
250} 265}
251 266
267static int is_valid_veth_mtu(int new_mtu)
268{
269 return (new_mtu >= MIN_MTU && new_mtu <= MAX_MTU);
270}
271
272static int veth_change_mtu(struct net_device *dev, int new_mtu)
273{
274 if (!is_valid_veth_mtu(new_mtu))
275 return -EINVAL;
276 dev->mtu = new_mtu;
277 return 0;
278}
279
252static int veth_dev_init(struct net_device *dev) 280static int veth_dev_init(struct net_device *dev)
253{ 281{
254 struct veth_net_stats *stats; 282 struct veth_net_stats *stats;
@@ -277,6 +305,7 @@ static const struct net_device_ops veth_netdev_ops = {
277 .ndo_open = veth_open, 305 .ndo_open = veth_open,
278 .ndo_stop = veth_close, 306 .ndo_stop = veth_close,
279 .ndo_start_xmit = veth_xmit, 307 .ndo_start_xmit = veth_xmit,
308 .ndo_change_mtu = veth_change_mtu,
280 .ndo_get_stats = veth_get_stats, 309 .ndo_get_stats = veth_get_stats,
281 .ndo_set_mac_address = eth_mac_addr, 310 .ndo_set_mac_address = eth_mac_addr,
282}; 311};
@@ -303,6 +332,10 @@ static int veth_validate(struct nlattr *tb[], struct nlattr *data[])
303 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) 332 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
304 return -EADDRNOTAVAIL; 333 return -EADDRNOTAVAIL;
305 } 334 }
335 if (tb[IFLA_MTU]) {
336 if (!is_valid_veth_mtu(nla_get_u32(tb[IFLA_MTU])))
337 return -EINVAL;
338 }
306 return 0; 339 return 0;
307} 340}
308 341