aboutsummaryrefslogtreecommitdiffstats
path: root/arch/um/drivers
diff options
context:
space:
mode:
authorJeff Dike <jdike@addtoit.com>2007-10-16 04:27:32 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 12:43:08 -0400
commit605c1e57690fddbd11347ec6788ff77c527994dd (patch)
tree81a296133a6b748c0822c69416802c9429d455cc /arch/um/drivers
parentb53f35a8093e6aed7e8e880eaa0b89a3d2fdfb0a (diff)
uml: correctly handle skb allocation failures
Handle memory allocation failures when reading packets. We have to read something from the host, even if we can't allocate any memory. If we don't, the host side of the device may fill up and stop delivering interrupts because no new packets can be queued. A single sk_buff is allocated whenever an MTU is seen which is larger than any seen earlier. This is used to read packets if there is a memory allocation failure. The large MTU check is done from eth_configure, which is called when a interface is added to the system. Signed-off-by: Jeff Dike <jdike@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/um/drivers')
-rw-r--r--arch/um/drivers/net_kern.c47
1 files changed, 47 insertions, 0 deletions
diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c
index 59811cc880e0..8c01fa81a1ae 100644
--- a/arch/um/drivers/net_kern.c
+++ b/arch/um/drivers/net_kern.c
@@ -34,6 +34,46 @@ static inline void set_ether_mac(struct net_device *dev, unsigned char *addr)
34static DEFINE_SPINLOCK(opened_lock); 34static DEFINE_SPINLOCK(opened_lock);
35static LIST_HEAD(opened); 35static LIST_HEAD(opened);
36 36
37/*
38 * The drop_skb is used when we can't allocate an skb. The
39 * packet is read into drop_skb in order to get the data off the
40 * connection to the host.
41 * It is reallocated whenever a maximum packet size is seen which is
42 * larger than any seen before. update_drop_skb is called from
43 * eth_configure when a new interface is added.
44 */
45static DEFINE_SPINLOCK(drop_lock);
46static struct sk_buff *drop_skb;
47static int drop_max;
48
49static int update_drop_skb(int max)
50{
51 struct sk_buff *new;
52 unsigned long flags;
53 int err = 0;
54
55 spin_lock_irqsave(&drop_lock, flags);
56
57 if (max <= drop_max)
58 goto out;
59
60 err = -ENOMEM;
61 new = dev_alloc_skb(max);
62 if (new == NULL)
63 goto out;
64
65 skb_put(new, max);
66
67 kfree_skb(drop_skb);
68 drop_skb = new;
69 drop_max = max;
70 err = 0;
71out:
72 spin_unlock_irqrestore(&drop_lock, flags);
73
74 return err;
75}
76
37static int uml_net_rx(struct net_device *dev) 77static int uml_net_rx(struct net_device *dev)
38{ 78{
39 struct uml_net_private *lp = dev->priv; 79 struct uml_net_private *lp = dev->priv;
@@ -43,6 +83,9 @@ static int uml_net_rx(struct net_device *dev)
43 /* If we can't allocate memory, try again next round. */ 83 /* If we can't allocate memory, try again next round. */
44 skb = dev_alloc_skb(lp->max_packet); 84 skb = dev_alloc_skb(lp->max_packet);
45 if (skb == NULL) { 85 if (skb == NULL) {
86 drop_skb->dev = dev;
87 /* Read a packet into drop_skb and don't do anything with it. */
88 (*lp->read)(lp->fd, drop_skb, lp);
46 lp->stats.rx_dropped++; 89 lp->stats.rx_dropped++;
47 return 0; 90 return 0;
48 } 91 }
@@ -447,6 +490,10 @@ static void eth_configure(int n, void *init, char *mac,
447 dev->watchdog_timeo = (HZ >> 1); 490 dev->watchdog_timeo = (HZ >> 1);
448 dev->irq = UM_ETH_IRQ; 491 dev->irq = UM_ETH_IRQ;
449 492
493 err = update_drop_skb(lp->max_packet);
494 if (err)
495 goto out_undo_user_init;
496
450 rtnl_lock(); 497 rtnl_lock();
451 err = register_netdevice(dev); 498 err = register_netdevice(dev);
452 rtnl_unlock(); 499 rtnl_unlock();