aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netfront.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/xen-netfront.c')
-rw-r--r--drivers/net/xen-netfront.c284
1 files changed, 201 insertions, 83 deletions
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 1db101415069..36808bf25677 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -29,6 +29,8 @@
29 * IN THE SOFTWARE. 29 * IN THE SOFTWARE.
30 */ 30 */
31 31
32#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33
32#include <linux/module.h> 34#include <linux/module.h>
33#include <linux/kernel.h> 35#include <linux/kernel.h>
34#include <linux/netdevice.h> 36#include <linux/netdevice.h>
@@ -85,7 +87,15 @@ struct netfront_info {
85 87
86 struct napi_struct napi; 88 struct napi_struct napi;
87 89
88 unsigned int evtchn; 90 /* Split event channels support, tx_* == rx_* when using
91 * single event channel.
92 */
93 unsigned int tx_evtchn, rx_evtchn;
94 unsigned int tx_irq, rx_irq;
95 /* Only used when split event channels support is enabled */
96 char tx_irq_name[IFNAMSIZ+4]; /* DEVNAME-tx */
97 char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */
98
89 struct xenbus_device *xbdev; 99 struct xenbus_device *xbdev;
90 100
91 spinlock_t tx_lock; 101 spinlock_t tx_lock;
@@ -276,8 +286,7 @@ no_skb:
276 break; 286 break;
277 } 287 }
278 288
279 __skb_fill_page_desc(skb, 0, page, 0, 0); 289 skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
280 skb_shinfo(skb)->nr_frags = 1;
281 __skb_queue_tail(&np->rx_batch, skb); 290 __skb_queue_tail(&np->rx_batch, skb);
282 } 291 }
283 292
@@ -330,7 +339,7 @@ no_skb:
330 push: 339 push:
331 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify); 340 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify);
332 if (notify) 341 if (notify)
333 notify_remote_via_irq(np->netdev->irq); 342 notify_remote_via_irq(np->rx_irq);
334} 343}
335 344
336static int xennet_open(struct net_device *dev) 345static int xennet_open(struct net_device *dev)
@@ -377,9 +386,8 @@ static void xennet_tx_buf_gc(struct net_device *dev)
377 skb = np->tx_skbs[id].skb; 386 skb = np->tx_skbs[id].skb;
378 if (unlikely(gnttab_query_foreign_access( 387 if (unlikely(gnttab_query_foreign_access(
379 np->grant_tx_ref[id]) != 0)) { 388 np->grant_tx_ref[id]) != 0)) {
380 printk(KERN_ALERT "xennet_tx_buf_gc: warning " 389 pr_alert("%s: warning -- grant still in use by backend domain\n",
381 "-- grant still in use by backend " 390 __func__);
382 "domain.\n");
383 BUG(); 391 BUG();
384 } 392 }
385 gnttab_end_foreign_access_ref( 393 gnttab_end_foreign_access_ref(
@@ -623,7 +631,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
623 631
624 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify); 632 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify);
625 if (notify) 633 if (notify)
626 notify_remote_via_irq(np->netdev->irq); 634 notify_remote_via_irq(np->tx_irq);
627 635
628 u64_stats_update_begin(&stats->syncp); 636 u64_stats_update_begin(&stats->syncp);
629 stats->tx_bytes += skb->len; 637 stats->tx_bytes += skb->len;
@@ -796,14 +804,14 @@ static int xennet_set_skb_gso(struct sk_buff *skb,
796{ 804{
797 if (!gso->u.gso.size) { 805 if (!gso->u.gso.size) {
798 if (net_ratelimit()) 806 if (net_ratelimit())
799 printk(KERN_WARNING "GSO size must not be zero.\n"); 807 pr_warn("GSO size must not be zero\n");
800 return -EINVAL; 808 return -EINVAL;
801 } 809 }
802 810
803 /* Currently only TCPv4 S.O. is supported. */ 811 /* Currently only TCPv4 S.O. is supported. */
804 if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) { 812 if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
805 if (net_ratelimit()) 813 if (net_ratelimit())
806 printk(KERN_WARNING "Bad GSO type %d.\n", gso->u.gso.type); 814 pr_warn("Bad GSO type %d\n", gso->u.gso.type);
807 return -EINVAL; 815 return -EINVAL;
808 } 816 }
809 817
@@ -822,7 +830,6 @@ static RING_IDX xennet_fill_frags(struct netfront_info *np,
822 struct sk_buff_head *list) 830 struct sk_buff_head *list)
823{ 831{
824 struct skb_shared_info *shinfo = skb_shinfo(skb); 832 struct skb_shared_info *shinfo = skb_shinfo(skb);
825 int nr_frags = shinfo->nr_frags;
826 RING_IDX cons = np->rx.rsp_cons; 833 RING_IDX cons = np->rx.rsp_cons;
827 struct sk_buff *nskb; 834 struct sk_buff *nskb;
828 835
@@ -831,26 +838,27 @@ static RING_IDX xennet_fill_frags(struct netfront_info *np,
831 RING_GET_RESPONSE(&np->rx, ++cons); 838 RING_GET_RESPONSE(&np->rx, ++cons);
832 skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0]; 839 skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
833 840
834 __skb_fill_page_desc(skb, nr_frags, 841 if (shinfo->nr_frags == MAX_SKB_FRAGS) {
835 skb_frag_page(nfrag), 842 unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
836 rx->offset, rx->status); 843
844 BUG_ON(pull_to <= skb_headlen(skb));
845 __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
846 }
847 BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS);
837 848
838 skb->data_len += rx->status; 849 skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag),
850 rx->offset, rx->status, PAGE_SIZE);
839 851
840 skb_shinfo(nskb)->nr_frags = 0; 852 skb_shinfo(nskb)->nr_frags = 0;
841 kfree_skb(nskb); 853 kfree_skb(nskb);
842
843 nr_frags++;
844 } 854 }
845 855
846 shinfo->nr_frags = nr_frags;
847 return cons; 856 return cons;
848} 857}
849 858
850static int checksum_setup(struct net_device *dev, struct sk_buff *skb) 859static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
851{ 860{
852 struct iphdr *iph; 861 struct iphdr *iph;
853 unsigned char *th;
854 int err = -EPROTO; 862 int err = -EPROTO;
855 int recalculate_partial_csum = 0; 863 int recalculate_partial_csum = 0;
856 864
@@ -875,27 +883,27 @@ static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
875 goto out; 883 goto out;
876 884
877 iph = (void *)skb->data; 885 iph = (void *)skb->data;
878 th = skb->data + 4 * iph->ihl;
879 if (th >= skb_tail_pointer(skb))
880 goto out;
881 886
882 skb->csum_start = th - skb->head;
883 switch (iph->protocol) { 887 switch (iph->protocol) {
884 case IPPROTO_TCP: 888 case IPPROTO_TCP:
885 skb->csum_offset = offsetof(struct tcphdr, check); 889 if (!skb_partial_csum_set(skb, 4 * iph->ihl,
890 offsetof(struct tcphdr, check)))
891 goto out;
886 892
887 if (recalculate_partial_csum) { 893 if (recalculate_partial_csum) {
888 struct tcphdr *tcph = (struct tcphdr *)th; 894 struct tcphdr *tcph = tcp_hdr(skb);
889 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 895 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
890 skb->len - iph->ihl*4, 896 skb->len - iph->ihl*4,
891 IPPROTO_TCP, 0); 897 IPPROTO_TCP, 0);
892 } 898 }
893 break; 899 break;
894 case IPPROTO_UDP: 900 case IPPROTO_UDP:
895 skb->csum_offset = offsetof(struct udphdr, check); 901 if (!skb_partial_csum_set(skb, 4 * iph->ihl,
902 offsetof(struct udphdr, check)))
903 goto out;
896 904
897 if (recalculate_partial_csum) { 905 if (recalculate_partial_csum) {
898 struct udphdr *udph = (struct udphdr *)th; 906 struct udphdr *udph = udp_hdr(skb);
899 udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 907 udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
900 skb->len - iph->ihl*4, 908 skb->len - iph->ihl*4,
901 IPPROTO_UDP, 0); 909 IPPROTO_UDP, 0);
@@ -903,15 +911,11 @@ static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
903 break; 911 break;
904 default: 912 default:
905 if (net_ratelimit()) 913 if (net_ratelimit())
906 printk(KERN_ERR "Attempting to checksum a non-" 914 pr_err("Attempting to checksum a non-TCP/UDP packet, dropping a protocol %d packet\n",
907 "TCP/UDP packet, dropping a protocol" 915 iph->protocol);
908 " %d packet", iph->protocol);
909 goto out; 916 goto out;
910 } 917 }
911 918
912 if ((th + skb->csum_offset + 2) > skb_tail_pointer(skb))
913 goto out;
914
915 err = 0; 919 err = 0;
916 920
917out: 921out:
@@ -929,7 +933,8 @@ static int handle_incoming_queue(struct net_device *dev,
929 while ((skb = __skb_dequeue(rxq)) != NULL) { 933 while ((skb = __skb_dequeue(rxq)) != NULL) {
930 int pull_to = NETFRONT_SKB_CB(skb)->pull_to; 934 int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
931 935
932 __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); 936 if (pull_to > skb_headlen(skb))
937 __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
933 938
934 /* Ethernet work: Delayed to here as it peeks the header. */ 939 /* Ethernet work: Delayed to here as it peeks the header. */
935 skb->protocol = eth_type_trans(skb, dev); 940 skb->protocol = eth_type_trans(skb, dev);
@@ -1015,16 +1020,10 @@ err:
1015 skb_shinfo(skb)->frags[0].page_offset = rx->offset; 1020 skb_shinfo(skb)->frags[0].page_offset = rx->offset;
1016 skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status); 1021 skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status);
1017 skb->data_len = rx->status; 1022 skb->data_len = rx->status;
1023 skb->len += rx->status;
1018 1024
1019 i = xennet_fill_frags(np, skb, &tmpq); 1025 i = xennet_fill_frags(np, skb, &tmpq);
1020 1026
1021 /*
1022 * Truesize is the actual allocation size, even if the
1023 * allocation is only partially used.
1024 */
1025 skb->truesize += PAGE_SIZE * skb_shinfo(skb)->nr_frags;
1026 skb->len += skb->data_len;
1027
1028 if (rx->flags & XEN_NETRXF_csum_blank) 1027 if (rx->flags & XEN_NETRXF_csum_blank)
1029 skb->ip_summed = CHECKSUM_PARTIAL; 1028 skb->ip_summed = CHECKSUM_PARTIAL;
1030 else if (rx->flags & XEN_NETRXF_data_validated) 1029 else if (rx->flags & XEN_NETRXF_data_validated)
@@ -1254,23 +1253,35 @@ static int xennet_set_features(struct net_device *dev,
1254 return 0; 1253 return 0;
1255} 1254}
1256 1255
1257static irqreturn_t xennet_interrupt(int irq, void *dev_id) 1256static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
1258{ 1257{
1259 struct net_device *dev = dev_id; 1258 struct netfront_info *np = dev_id;
1260 struct netfront_info *np = netdev_priv(dev); 1259 struct net_device *dev = np->netdev;
1261 unsigned long flags; 1260 unsigned long flags;
1262 1261
1263 spin_lock_irqsave(&np->tx_lock, flags); 1262 spin_lock_irqsave(&np->tx_lock, flags);
1263 xennet_tx_buf_gc(dev);
1264 spin_unlock_irqrestore(&np->tx_lock, flags);
1264 1265
1265 if (likely(netif_carrier_ok(dev))) { 1266 return IRQ_HANDLED;
1266 xennet_tx_buf_gc(dev); 1267}
1267 /* Under tx_lock: protects access to rx shared-ring indexes. */ 1268
1268 if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) 1269static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
1270{
1271 struct netfront_info *np = dev_id;
1272 struct net_device *dev = np->netdev;
1273
1274 if (likely(netif_carrier_ok(dev) &&
1275 RING_HAS_UNCONSUMED_RESPONSES(&np->rx)))
1269 napi_schedule(&np->napi); 1276 napi_schedule(&np->napi);
1270 }
1271 1277
1272 spin_unlock_irqrestore(&np->tx_lock, flags); 1278 return IRQ_HANDLED;
1279}
1273 1280
1281static irqreturn_t xennet_interrupt(int irq, void *dev_id)
1282{
1283 xennet_tx_interrupt(irq, dev_id);
1284 xennet_rx_interrupt(irq, dev_id);
1274 return IRQ_HANDLED; 1285 return IRQ_HANDLED;
1275} 1286}
1276 1287
@@ -1343,14 +1354,14 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1343 /* A grant for every tx ring slot */ 1354 /* A grant for every tx ring slot */
1344 if (gnttab_alloc_grant_references(TX_MAX_TARGET, 1355 if (gnttab_alloc_grant_references(TX_MAX_TARGET,
1345 &np->gref_tx_head) < 0) { 1356 &np->gref_tx_head) < 0) {
1346 printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n"); 1357 pr_alert("can't alloc tx grant refs\n");
1347 err = -ENOMEM; 1358 err = -ENOMEM;
1348 goto exit_free_stats; 1359 goto exit_free_stats;
1349 } 1360 }
1350 /* A grant for every rx ring slot */ 1361 /* A grant for every rx ring slot */
1351 if (gnttab_alloc_grant_references(RX_MAX_TARGET, 1362 if (gnttab_alloc_grant_references(RX_MAX_TARGET,
1352 &np->gref_rx_head) < 0) { 1363 &np->gref_rx_head) < 0) {
1353 printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n"); 1364 pr_alert("can't alloc rx grant refs\n");
1354 err = -ENOMEM; 1365 err = -ENOMEM;
1355 goto exit_free_tx; 1366 goto exit_free_tx;
1356 } 1367 }
@@ -1414,16 +1425,14 @@ static int netfront_probe(struct xenbus_device *dev,
1414 1425
1415 err = register_netdev(info->netdev); 1426 err = register_netdev(info->netdev);
1416 if (err) { 1427 if (err) {
1417 printk(KERN_WARNING "%s: register_netdev err=%d\n", 1428 pr_warn("%s: register_netdev err=%d\n", __func__, err);
1418 __func__, err);
1419 goto fail; 1429 goto fail;
1420 } 1430 }
1421 1431
1422 err = xennet_sysfs_addif(info->netdev); 1432 err = xennet_sysfs_addif(info->netdev);
1423 if (err) { 1433 if (err) {
1424 unregister_netdev(info->netdev); 1434 unregister_netdev(info->netdev);
1425 printk(KERN_WARNING "%s: add sysfs failed err=%d\n", 1435 pr_warn("%s: add sysfs failed err=%d\n", __func__, err);
1426 __func__, err);
1427 goto fail; 1436 goto fail;
1428 } 1437 }
1429 1438
@@ -1451,9 +1460,14 @@ static void xennet_disconnect_backend(struct netfront_info *info)
1451 spin_unlock_irq(&info->tx_lock); 1460 spin_unlock_irq(&info->tx_lock);
1452 spin_unlock_bh(&info->rx_lock); 1461 spin_unlock_bh(&info->rx_lock);
1453 1462
1454 if (info->netdev->irq) 1463 if (info->tx_irq && (info->tx_irq == info->rx_irq))
1455 unbind_from_irqhandler(info->netdev->irq, info->netdev); 1464 unbind_from_irqhandler(info->tx_irq, info);
1456 info->evtchn = info->netdev->irq = 0; 1465 if (info->tx_irq && (info->tx_irq != info->rx_irq)) {
1466 unbind_from_irqhandler(info->tx_irq, info);
1467 unbind_from_irqhandler(info->rx_irq, info);
1468 }
1469 info->tx_evtchn = info->rx_evtchn = 0;
1470 info->tx_irq = info->rx_irq = 0;
1457 1471
1458 /* End access and free the pages */ 1472 /* End access and free the pages */
1459 xennet_end_access(info->tx_ring_ref, info->tx.sring); 1473 xennet_end_access(info->tx_ring_ref, info->tx.sring);
@@ -1503,12 +1517,82 @@ static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
1503 return 0; 1517 return 0;
1504} 1518}
1505 1519
1520static int setup_netfront_single(struct netfront_info *info)
1521{
1522 int err;
1523
1524 err = xenbus_alloc_evtchn(info->xbdev, &info->tx_evtchn);
1525 if (err < 0)
1526 goto fail;
1527
1528 err = bind_evtchn_to_irqhandler(info->tx_evtchn,
1529 xennet_interrupt,
1530 0, info->netdev->name, info);
1531 if (err < 0)
1532 goto bind_fail;
1533 info->rx_evtchn = info->tx_evtchn;
1534 info->rx_irq = info->tx_irq = err;
1535
1536 return 0;
1537
1538bind_fail:
1539 xenbus_free_evtchn(info->xbdev, info->tx_evtchn);
1540 info->tx_evtchn = 0;
1541fail:
1542 return err;
1543}
1544
1545static int setup_netfront_split(struct netfront_info *info)
1546{
1547 int err;
1548
1549 err = xenbus_alloc_evtchn(info->xbdev, &info->tx_evtchn);
1550 if (err < 0)
1551 goto fail;
1552 err = xenbus_alloc_evtchn(info->xbdev, &info->rx_evtchn);
1553 if (err < 0)
1554 goto alloc_rx_evtchn_fail;
1555
1556 snprintf(info->tx_irq_name, sizeof(info->tx_irq_name),
1557 "%s-tx", info->netdev->name);
1558 err = bind_evtchn_to_irqhandler(info->tx_evtchn,
1559 xennet_tx_interrupt,
1560 0, info->tx_irq_name, info);
1561 if (err < 0)
1562 goto bind_tx_fail;
1563 info->tx_irq = err;
1564
1565 snprintf(info->rx_irq_name, sizeof(info->rx_irq_name),
1566 "%s-rx", info->netdev->name);
1567 err = bind_evtchn_to_irqhandler(info->rx_evtchn,
1568 xennet_rx_interrupt,
1569 0, info->rx_irq_name, info);
1570 if (err < 0)
1571 goto bind_rx_fail;
1572 info->rx_irq = err;
1573
1574 return 0;
1575
1576bind_rx_fail:
1577 unbind_from_irqhandler(info->tx_irq, info);
1578 info->tx_irq = 0;
1579bind_tx_fail:
1580 xenbus_free_evtchn(info->xbdev, info->rx_evtchn);
1581 info->rx_evtchn = 0;
1582alloc_rx_evtchn_fail:
1583 xenbus_free_evtchn(info->xbdev, info->tx_evtchn);
1584 info->tx_evtchn = 0;
1585fail:
1586 return err;
1587}
1588
1506static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info) 1589static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info)
1507{ 1590{
1508 struct xen_netif_tx_sring *txs; 1591 struct xen_netif_tx_sring *txs;
1509 struct xen_netif_rx_sring *rxs; 1592 struct xen_netif_rx_sring *rxs;
1510 int err; 1593 int err;
1511 struct net_device *netdev = info->netdev; 1594 struct net_device *netdev = info->netdev;
1595 unsigned int feature_split_evtchn;
1512 1596
1513 info->tx_ring_ref = GRANT_INVALID_REF; 1597 info->tx_ring_ref = GRANT_INVALID_REF;
1514 info->rx_ring_ref = GRANT_INVALID_REF; 1598 info->rx_ring_ref = GRANT_INVALID_REF;
@@ -1516,6 +1600,12 @@ static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info)
1516 info->tx.sring = NULL; 1600 info->tx.sring = NULL;
1517 netdev->irq = 0; 1601 netdev->irq = 0;
1518 1602
1603 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
1604 "feature-split-event-channels", "%u",
1605 &feature_split_evtchn);
1606 if (err < 0)
1607 feature_split_evtchn = 0;
1608
1519 err = xen_net_read_mac(dev, netdev->dev_addr); 1609 err = xen_net_read_mac(dev, netdev->dev_addr);
1520 if (err) { 1610 if (err) {
1521 xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename); 1611 xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
@@ -1532,40 +1622,50 @@ static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info)
1532 FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE); 1622 FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE);
1533 1623
1534 err = xenbus_grant_ring(dev, virt_to_mfn(txs)); 1624 err = xenbus_grant_ring(dev, virt_to_mfn(txs));
1535 if (err < 0) { 1625 if (err < 0)
1536 free_page((unsigned long)txs); 1626 goto grant_tx_ring_fail;
1537 goto fail;
1538 }
1539 1627
1540 info->tx_ring_ref = err; 1628 info->tx_ring_ref = err;
1541 rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); 1629 rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1542 if (!rxs) { 1630 if (!rxs) {
1543 err = -ENOMEM; 1631 err = -ENOMEM;
1544 xenbus_dev_fatal(dev, err, "allocating rx ring page"); 1632 xenbus_dev_fatal(dev, err, "allocating rx ring page");
1545 goto fail; 1633 goto alloc_rx_ring_fail;
1546 } 1634 }
1547 SHARED_RING_INIT(rxs); 1635 SHARED_RING_INIT(rxs);
1548 FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE); 1636 FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE);
1549 1637
1550 err = xenbus_grant_ring(dev, virt_to_mfn(rxs)); 1638 err = xenbus_grant_ring(dev, virt_to_mfn(rxs));
1551 if (err < 0) { 1639 if (err < 0)
1552 free_page((unsigned long)rxs); 1640 goto grant_rx_ring_fail;
1553 goto fail;
1554 }
1555 info->rx_ring_ref = err; 1641 info->rx_ring_ref = err;
1556 1642
1557 err = xenbus_alloc_evtchn(dev, &info->evtchn); 1643 if (feature_split_evtchn)
1644 err = setup_netfront_split(info);
1645 /* setup single event channel if
1646 * a) feature-split-event-channels == 0
1647 * b) feature-split-event-channels == 1 but failed to setup
1648 */
1649 if (!feature_split_evtchn || (feature_split_evtchn && err))
1650 err = setup_netfront_single(info);
1651
1558 if (err) 1652 if (err)
1559 goto fail; 1653 goto alloc_evtchn_fail;
1560 1654
1561 err = bind_evtchn_to_irqhandler(info->evtchn, xennet_interrupt,
1562 0, netdev->name, netdev);
1563 if (err < 0)
1564 goto fail;
1565 netdev->irq = err;
1566 return 0; 1655 return 0;
1567 1656
1568 fail: 1657 /* If we fail to setup netfront, it is safe to just revoke access to
1658 * granted pages because backend is not accessing it at this point.
1659 */
1660alloc_evtchn_fail:
1661 gnttab_end_foreign_access_ref(info->rx_ring_ref, 0);
1662grant_rx_ring_fail:
1663 free_page((unsigned long)rxs);
1664alloc_rx_ring_fail:
1665 gnttab_end_foreign_access_ref(info->tx_ring_ref, 0);
1666grant_tx_ring_fail:
1667 free_page((unsigned long)txs);
1668fail:
1569 return err; 1669 return err;
1570} 1670}
1571 1671
@@ -1601,11 +1701,27 @@ again:
1601 message = "writing rx ring-ref"; 1701 message = "writing rx ring-ref";
1602 goto abort_transaction; 1702 goto abort_transaction;
1603 } 1703 }
1604 err = xenbus_printf(xbt, dev->nodename, 1704
1605 "event-channel", "%u", info->evtchn); 1705 if (info->tx_evtchn == info->rx_evtchn) {
1606 if (err) { 1706 err = xenbus_printf(xbt, dev->nodename,
1607 message = "writing event-channel"; 1707 "event-channel", "%u", info->tx_evtchn);
1608 goto abort_transaction; 1708 if (err) {
1709 message = "writing event-channel";
1710 goto abort_transaction;
1711 }
1712 } else {
1713 err = xenbus_printf(xbt, dev->nodename,
1714 "event-channel-tx", "%u", info->tx_evtchn);
1715 if (err) {
1716 message = "writing event-channel-tx";
1717 goto abort_transaction;
1718 }
1719 err = xenbus_printf(xbt, dev->nodename,
1720 "event-channel-rx", "%u", info->rx_evtchn);
1721 if (err) {
1722 message = "writing event-channel-rx";
1723 goto abort_transaction;
1724 }
1609 } 1725 }
1610 1726
1611 err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u", 1727 err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
@@ -1718,7 +1834,9 @@ static int xennet_connect(struct net_device *dev)
1718 * packets. 1834 * packets.
1719 */ 1835 */
1720 netif_carrier_on(np->netdev); 1836 netif_carrier_on(np->netdev);
1721 notify_remote_via_irq(np->netdev->irq); 1837 notify_remote_via_irq(np->tx_irq);
1838 if (np->tx_irq != np->rx_irq)
1839 notify_remote_via_irq(np->rx_irq);
1722 xennet_tx_buf_gc(dev); 1840 xennet_tx_buf_gc(dev);
1723 xennet_alloc_rx_buffers(dev); 1841 xennet_alloc_rx_buffers(dev);
1724 1842
@@ -1991,7 +2109,7 @@ static int __init netif_init(void)
1991 if (xen_hvm_domain() && !xen_platform_pci_unplug) 2109 if (xen_hvm_domain() && !xen_platform_pci_unplug)
1992 return -ENODEV; 2110 return -ENODEV;
1993 2111
1994 printk(KERN_INFO "Initialising Xen virtual ethernet driver.\n"); 2112 pr_info("Initialising Xen virtual ethernet driver\n");
1995 2113
1996 return xenbus_register_frontend(&netfront_driver); 2114 return xenbus_register_frontend(&netfront_driver);
1997} 2115}