aboutsummaryrefslogtreecommitdiffstats
path: root/arch/um/drivers/net_kern.c
diff options
context:
space:
mode:
authorJeff Dike <jdike@addtoit.com>2007-10-16 04:27:31 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 12:43:08 -0400
commitb53f35a8093e6aed7e8e880eaa0b89a3d2fdfb0a (patch)
tree50e19688753650e27b1f7fc1d48eb8683666e6b7 /arch/um/drivers/net_kern.c
parentcd1ae0e49bdd814cfaa2e5ab28cff21a30e20085 (diff)
uml: network driver MTU cleanups
A bunch of MTU-related cleanups in the network code. First, there is the addition of the notion of a maximally-sized packet, which is the MTU plus headers. This is used to size the skb that will receive a packet. This allows ether_adjust_skb to go away, as it was used to resize the skb after it was allocated. Since the skb passed into the low-level read routine is no longer resized, and possibly reallocated, there, they (and the write routines) don't need to get an sk_buff **. They just need the sk_buff * now. The callers of ether_adjust_skb still need to do the skb_put, so that's now inlined. The MAX_PACKET definitions in most of the drivers are gone. The set_mtu methods were all the same and did nothing, so they can be removed. The ethertap driver had a typo which doubled the size of the packet rather than adding two bytes to it. It also wasn't defining its setup_size, causing a zero-byte kmalloc and crash when the invalid pointer returned from kmalloc was dereferenced. Signed-off-by: Jeff Dike <jdike@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/um/drivers/net_kern.c')
-rw-r--r--arch/um/drivers/net_kern.c42
1 files changed, 8 insertions, 34 deletions
diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c
index b097a24c1496..59811cc880e0 100644
--- a/arch/um/drivers/net_kern.c
+++ b/arch/um/drivers/net_kern.c
@@ -41,16 +41,16 @@ static int uml_net_rx(struct net_device *dev)
41 struct sk_buff *skb; 41 struct sk_buff *skb;
42 42
43 /* If we can't allocate memory, try again next round. */ 43 /* If we can't allocate memory, try again next round. */
44 skb = dev_alloc_skb(dev->mtu); 44 skb = dev_alloc_skb(lp->max_packet);
45 if (skb == NULL) { 45 if (skb == NULL) {
46 lp->stats.rx_dropped++; 46 lp->stats.rx_dropped++;
47 return 0; 47 return 0;
48 } 48 }
49 49
50 skb->dev = dev; 50 skb->dev = dev;
51 skb_put(skb, dev->mtu); 51 skb_put(skb, lp->max_packet);
52 skb_reset_mac_header(skb); 52 skb_reset_mac_header(skb);
53 pkt_len = (*lp->read)(lp->fd, &skb, lp); 53 pkt_len = (*lp->read)(lp->fd, skb, lp);
54 54
55 if (pkt_len > 0) { 55 if (pkt_len > 0) {
56 skb_trim(skb, pkt_len); 56 skb_trim(skb, pkt_len);
@@ -178,7 +178,7 @@ static int uml_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
178 178
179 spin_lock_irqsave(&lp->lock, flags); 179 spin_lock_irqsave(&lp->lock, flags);
180 180
181 len = (*lp->write)(lp->fd, &skb, lp); 181 len = (*lp->write)(lp->fd, skb, lp);
182 182
183 if (len == skb->len) { 183 if (len == skb->len) {
184 lp->stats.tx_packets++; 184 lp->stats.tx_packets++;
@@ -240,22 +240,9 @@ static int uml_net_set_mac(struct net_device *dev, void *addr)
240 240
241static int uml_net_change_mtu(struct net_device *dev, int new_mtu) 241static int uml_net_change_mtu(struct net_device *dev, int new_mtu)
242{ 242{
243 struct uml_net_private *lp = dev->priv;
244 int err = 0;
245
246 spin_lock_irq(&lp->lock);
247
248 new_mtu = (*lp->set_mtu)(new_mtu, &lp->user);
249 if (new_mtu < 0) {
250 err = new_mtu;
251 goto out;
252 }
253
254 dev->mtu = new_mtu; 243 dev->mtu = new_mtu;
255 244
256 out: 245 return 0;
257 spin_unlock_irq(&lp->lock);
258 return err;
259} 246}
260 247
261static void uml_net_get_drvinfo(struct net_device *dev, 248static void uml_net_get_drvinfo(struct net_device *dev,
@@ -427,6 +414,7 @@ static void eth_configure(int n, void *init, char *mac,
427 .dev = dev, 414 .dev = dev,
428 .fd = -1, 415 .fd = -1,
429 .mac = { 0xfe, 0xfd, 0x0, 0x0, 0x0, 0x0}, 416 .mac = { 0xfe, 0xfd, 0x0, 0x0, 0x0, 0x0},
417 .max_packet = transport->user->max_packet,
430 .protocol = transport->kern->protocol, 418 .protocol = transport->kern->protocol,
431 .open = transport->user->open, 419 .open = transport->user->open,
432 .close = transport->user->close, 420 .close = transport->user->close,
@@ -434,8 +422,7 @@ static void eth_configure(int n, void *init, char *mac,
434 .read = transport->kern->read, 422 .read = transport->kern->read,
435 .write = transport->kern->write, 423 .write = transport->kern->write,
436 .add_address = transport->user->add_address, 424 .add_address = transport->user->add_address,
437 .delete_address = transport->user->delete_address, 425 .delete_address = transport->user->delete_address });
438 .set_mtu = transport->user->set_mtu });
439 426
440 init_timer(&lp->tl); 427 init_timer(&lp->tl);
441 spin_lock_init(&lp->lock); 428 spin_lock_init(&lp->lock);
@@ -447,7 +434,7 @@ static void eth_configure(int n, void *init, char *mac,
447 goto out_unregister; 434 goto out_unregister;
448 435
449 set_ether_mac(dev, device->mac); 436 set_ether_mac(dev, device->mac);
450 dev->mtu = transport->user->max_packet; 437 dev->mtu = transport->user->mtu;
451 dev->open = uml_net_open; 438 dev->open = uml_net_open;
452 dev->hard_start_xmit = uml_net_start_xmit; 439 dev->hard_start_xmit = uml_net_start_xmit;
453 dev->stop = uml_net_close; 440 dev->stop = uml_net_close;
@@ -807,19 +794,6 @@ static void close_devices(void)
807 794
808__uml_exitcall(close_devices); 795__uml_exitcall(close_devices);
809 796
810struct sk_buff *ether_adjust_skb(struct sk_buff *skb, int extra)
811{
812 if ((skb != NULL) && (skb_tailroom(skb) < extra)) {
813 struct sk_buff *skb2;
814
815 skb2 = skb_copy_expand(skb, 0, extra, GFP_ATOMIC);
816 dev_kfree_skb(skb);
817 skb = skb2;
818 }
819 if (skb != NULL) skb_put(skb, extra);
820 return skb;
821}
822
823void iter_addresses(void *d, void (*cb)(unsigned char *, unsigned char *, 797void iter_addresses(void *d, void (*cb)(unsigned char *, unsigned char *,
824 void *), 798 void *),
825 void *arg) 799 void *arg)