diff options
Diffstat (limited to 'drivers/net/xen-netfront.c')
-rw-r--r-- | drivers/net/xen-netfront.c | 62 |
1 files changed, 35 insertions, 27 deletions
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 698b905058dd..b16175032327 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c | |||
@@ -489,6 +489,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
489 | int frags = skb_shinfo(skb)->nr_frags; | 489 | int frags = skb_shinfo(skb)->nr_frags; |
490 | unsigned int offset = offset_in_page(data); | 490 | unsigned int offset = offset_in_page(data); |
491 | unsigned int len = skb_headlen(skb); | 491 | unsigned int len = skb_headlen(skb); |
492 | unsigned long flags; | ||
492 | 493 | ||
493 | frags += DIV_ROUND_UP(offset + len, PAGE_SIZE); | 494 | frags += DIV_ROUND_UP(offset + len, PAGE_SIZE); |
494 | if (unlikely(frags > MAX_SKB_FRAGS + 1)) { | 495 | if (unlikely(frags > MAX_SKB_FRAGS + 1)) { |
@@ -498,12 +499,12 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
498 | goto drop; | 499 | goto drop; |
499 | } | 500 | } |
500 | 501 | ||
501 | spin_lock_irq(&np->tx_lock); | 502 | spin_lock_irqsave(&np->tx_lock, flags); |
502 | 503 | ||
503 | if (unlikely(!netif_carrier_ok(dev) || | 504 | if (unlikely(!netif_carrier_ok(dev) || |
504 | (frags > 1 && !xennet_can_sg(dev)) || | 505 | (frags > 1 && !xennet_can_sg(dev)) || |
505 | netif_needs_gso(skb, netif_skb_features(skb)))) { | 506 | netif_needs_gso(skb, netif_skb_features(skb)))) { |
506 | spin_unlock_irq(&np->tx_lock); | 507 | spin_unlock_irqrestore(&np->tx_lock, flags); |
507 | goto drop; | 508 | goto drop; |
508 | } | 509 | } |
509 | 510 | ||
@@ -574,7 +575,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
574 | if (!netfront_tx_slot_available(np)) | 575 | if (!netfront_tx_slot_available(np)) |
575 | netif_stop_queue(dev); | 576 | netif_stop_queue(dev); |
576 | 577 | ||
577 | spin_unlock_irq(&np->tx_lock); | 578 | spin_unlock_irqrestore(&np->tx_lock, flags); |
578 | 579 | ||
579 | return NETDEV_TX_OK; | 580 | return NETDEV_TX_OK; |
580 | 581 | ||
@@ -1228,6 +1229,33 @@ static int xennet_set_features(struct net_device *dev, | |||
1228 | return 0; | 1229 | return 0; |
1229 | } | 1230 | } |
1230 | 1231 | ||
1232 | static irqreturn_t xennet_interrupt(int irq, void *dev_id) | ||
1233 | { | ||
1234 | struct net_device *dev = dev_id; | ||
1235 | struct netfront_info *np = netdev_priv(dev); | ||
1236 | unsigned long flags; | ||
1237 | |||
1238 | spin_lock_irqsave(&np->tx_lock, flags); | ||
1239 | |||
1240 | if (likely(netif_carrier_ok(dev))) { | ||
1241 | xennet_tx_buf_gc(dev); | ||
1242 | /* Under tx_lock: protects access to rx shared-ring indexes. */ | ||
1243 | if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) | ||
1244 | napi_schedule(&np->napi); | ||
1245 | } | ||
1246 | |||
1247 | spin_unlock_irqrestore(&np->tx_lock, flags); | ||
1248 | |||
1249 | return IRQ_HANDLED; | ||
1250 | } | ||
1251 | |||
1252 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
1253 | static void xennet_poll_controller(struct net_device *dev) | ||
1254 | { | ||
1255 | xennet_interrupt(0, dev); | ||
1256 | } | ||
1257 | #endif | ||
1258 | |||
1231 | static const struct net_device_ops xennet_netdev_ops = { | 1259 | static const struct net_device_ops xennet_netdev_ops = { |
1232 | .ndo_open = xennet_open, | 1260 | .ndo_open = xennet_open, |
1233 | .ndo_uninit = xennet_uninit, | 1261 | .ndo_uninit = xennet_uninit, |
@@ -1239,6 +1267,9 @@ static const struct net_device_ops xennet_netdev_ops = { | |||
1239 | .ndo_validate_addr = eth_validate_addr, | 1267 | .ndo_validate_addr = eth_validate_addr, |
1240 | .ndo_fix_features = xennet_fix_features, | 1268 | .ndo_fix_features = xennet_fix_features, |
1241 | .ndo_set_features = xennet_set_features, | 1269 | .ndo_set_features = xennet_set_features, |
1270 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
1271 | .ndo_poll_controller = xennet_poll_controller, | ||
1272 | #endif | ||
1242 | }; | 1273 | }; |
1243 | 1274 | ||
1244 | static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev) | 1275 | static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev) |
@@ -1248,11 +1279,8 @@ static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev | |||
1248 | struct netfront_info *np; | 1279 | struct netfront_info *np; |
1249 | 1280 | ||
1250 | netdev = alloc_etherdev(sizeof(struct netfront_info)); | 1281 | netdev = alloc_etherdev(sizeof(struct netfront_info)); |
1251 | if (!netdev) { | 1282 | if (!netdev) |
1252 | printk(KERN_WARNING "%s> alloc_etherdev failed.\n", | ||
1253 | __func__); | ||
1254 | return ERR_PTR(-ENOMEM); | 1283 | return ERR_PTR(-ENOMEM); |
1255 | } | ||
1256 | 1284 | ||
1257 | np = netdev_priv(netdev); | 1285 | np = netdev_priv(netdev); |
1258 | np->xbdev = dev; | 1286 | np->xbdev = dev; |
@@ -1448,26 +1476,6 @@ static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]) | |||
1448 | return 0; | 1476 | return 0; |
1449 | } | 1477 | } |
1450 | 1478 | ||
1451 | static irqreturn_t xennet_interrupt(int irq, void *dev_id) | ||
1452 | { | ||
1453 | struct net_device *dev = dev_id; | ||
1454 | struct netfront_info *np = netdev_priv(dev); | ||
1455 | unsigned long flags; | ||
1456 | |||
1457 | spin_lock_irqsave(&np->tx_lock, flags); | ||
1458 | |||
1459 | if (likely(netif_carrier_ok(dev))) { | ||
1460 | xennet_tx_buf_gc(dev); | ||
1461 | /* Under tx_lock: protects access to rx shared-ring indexes. */ | ||
1462 | if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) | ||
1463 | napi_schedule(&np->napi); | ||
1464 | } | ||
1465 | |||
1466 | spin_unlock_irqrestore(&np->tx_lock, flags); | ||
1467 | |||
1468 | return IRQ_HANDLED; | ||
1469 | } | ||
1470 | |||
1471 | static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info) | 1479 | static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info) |
1472 | { | 1480 | { |
1473 | struct xen_netif_tx_sring *txs; | 1481 | struct xen_netif_tx_sring *txs; |