diff options
Diffstat (limited to 'drivers/net/xen-netfront.c')
-rw-r--r-- | drivers/net/xen-netfront.c | 246 |
1 files changed, 163 insertions, 83 deletions
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index b50fedcef8ac..d29365a232a1 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c | |||
@@ -66,8 +66,8 @@ struct netfront_cb { | |||
66 | 66 | ||
67 | #define GRANT_INVALID_REF 0 | 67 | #define GRANT_INVALID_REF 0 |
68 | 68 | ||
69 | #define NET_TX_RING_SIZE __RING_SIZE((struct xen_netif_tx_sring *)0, PAGE_SIZE) | 69 | #define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE) |
70 | #define NET_RX_RING_SIZE __RING_SIZE((struct xen_netif_rx_sring *)0, PAGE_SIZE) | 70 | #define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE) |
71 | #define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) | 71 | #define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) |
72 | 72 | ||
73 | struct netfront_info { | 73 | struct netfront_info { |
@@ -120,6 +120,9 @@ struct netfront_info { | |||
120 | unsigned long rx_pfn_array[NET_RX_RING_SIZE]; | 120 | unsigned long rx_pfn_array[NET_RX_RING_SIZE]; |
121 | struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1]; | 121 | struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1]; |
122 | struct mmu_update rx_mmu[NET_RX_RING_SIZE]; | 122 | struct mmu_update rx_mmu[NET_RX_RING_SIZE]; |
123 | |||
124 | /* Statistics */ | ||
125 | unsigned long rx_gso_checksum_fixup; | ||
123 | }; | 126 | }; |
124 | 127 | ||
125 | struct netfront_rx_info { | 128 | struct netfront_rx_info { |
@@ -135,7 +138,7 @@ static void skb_entry_set_link(union skb_entry *list, unsigned short id) | |||
135 | static int skb_entry_is_link(const union skb_entry *list) | 138 | static int skb_entry_is_link(const union skb_entry *list) |
136 | { | 139 | { |
137 | BUILD_BUG_ON(sizeof(list->skb) != sizeof(list->link)); | 140 | BUILD_BUG_ON(sizeof(list->skb) != sizeof(list->link)); |
138 | return ((unsigned long)list->skb < PAGE_OFFSET); | 141 | return (unsigned long)list->skb < PAGE_OFFSET; |
139 | } | 142 | } |
140 | 143 | ||
141 | /* | 144 | /* |
@@ -203,8 +206,8 @@ static void rx_refill_timeout(unsigned long data) | |||
203 | 206 | ||
204 | static int netfront_tx_slot_available(struct netfront_info *np) | 207 | static int netfront_tx_slot_available(struct netfront_info *np) |
205 | { | 208 | { |
206 | return ((np->tx.req_prod_pvt - np->tx.rsp_cons) < | 209 | return (np->tx.req_prod_pvt - np->tx.rsp_cons) < |
207 | (TX_MAX_TARGET - MAX_SKB_FRAGS - 2)); | 210 | (TX_MAX_TARGET - MAX_SKB_FRAGS - 2); |
208 | } | 211 | } |
209 | 212 | ||
210 | static void xennet_maybe_wake_tx(struct net_device *dev) | 213 | static void xennet_maybe_wake_tx(struct net_device *dev) |
@@ -356,7 +359,7 @@ static void xennet_tx_buf_gc(struct net_device *dev) | |||
356 | struct xen_netif_tx_response *txrsp; | 359 | struct xen_netif_tx_response *txrsp; |
357 | 360 | ||
358 | txrsp = RING_GET_RESPONSE(&np->tx, cons); | 361 | txrsp = RING_GET_RESPONSE(&np->tx, cons); |
359 | if (txrsp->status == NETIF_RSP_NULL) | 362 | if (txrsp->status == XEN_NETIF_RSP_NULL) |
360 | continue; | 363 | continue; |
361 | 364 | ||
362 | id = txrsp->id; | 365 | id = txrsp->id; |
@@ -413,7 +416,7 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev, | |||
413 | larger than a page), split it it into page-sized chunks. */ | 416 | larger than a page), split it it into page-sized chunks. */ |
414 | while (len > PAGE_SIZE - offset) { | 417 | while (len > PAGE_SIZE - offset) { |
415 | tx->size = PAGE_SIZE - offset; | 418 | tx->size = PAGE_SIZE - offset; |
416 | tx->flags |= NETTXF_more_data; | 419 | tx->flags |= XEN_NETTXF_more_data; |
417 | len -= tx->size; | 420 | len -= tx->size; |
418 | data += tx->size; | 421 | data += tx->size; |
419 | offset = 0; | 422 | offset = 0; |
@@ -439,7 +442,7 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev, | |||
439 | for (i = 0; i < frags; i++) { | 442 | for (i = 0; i < frags; i++) { |
440 | skb_frag_t *frag = skb_shinfo(skb)->frags + i; | 443 | skb_frag_t *frag = skb_shinfo(skb)->frags + i; |
441 | 444 | ||
442 | tx->flags |= NETTXF_more_data; | 445 | tx->flags |= XEN_NETTXF_more_data; |
443 | 446 | ||
444 | id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs); | 447 | id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs); |
445 | np->tx_skbs[id].skb = skb_get(skb); | 448 | np->tx_skbs[id].skb = skb_get(skb); |
@@ -488,7 +491,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
488 | 491 | ||
489 | if (unlikely(!netif_carrier_ok(dev) || | 492 | if (unlikely(!netif_carrier_ok(dev) || |
490 | (frags > 1 && !xennet_can_sg(dev)) || | 493 | (frags > 1 && !xennet_can_sg(dev)) || |
491 | netif_needs_gso(dev, skb))) { | 494 | netif_needs_gso(skb, netif_skb_features(skb)))) { |
492 | spin_unlock_irq(&np->tx_lock); | 495 | spin_unlock_irq(&np->tx_lock); |
493 | goto drop; | 496 | goto drop; |
494 | } | 497 | } |
@@ -514,10 +517,10 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
514 | tx->flags = 0; | 517 | tx->flags = 0; |
515 | if (skb->ip_summed == CHECKSUM_PARTIAL) | 518 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
516 | /* local packet? */ | 519 | /* local packet? */ |
517 | tx->flags |= NETTXF_csum_blank | NETTXF_data_validated; | 520 | tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated; |
518 | else if (skb->ip_summed == CHECKSUM_UNNECESSARY) | 521 | else if (skb->ip_summed == CHECKSUM_UNNECESSARY) |
519 | /* remote but checksummed. */ | 522 | /* remote but checksummed. */ |
520 | tx->flags |= NETTXF_data_validated; | 523 | tx->flags |= XEN_NETTXF_data_validated; |
521 | 524 | ||
522 | if (skb_shinfo(skb)->gso_size) { | 525 | if (skb_shinfo(skb)->gso_size) { |
523 | struct xen_netif_extra_info *gso; | 526 | struct xen_netif_extra_info *gso; |
@@ -528,7 +531,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
528 | if (extra) | 531 | if (extra) |
529 | extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE; | 532 | extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE; |
530 | else | 533 | else |
531 | tx->flags |= NETTXF_extra_info; | 534 | tx->flags |= XEN_NETTXF_extra_info; |
532 | 535 | ||
533 | gso->u.gso.size = skb_shinfo(skb)->gso_size; | 536 | gso->u.gso.size = skb_shinfo(skb)->gso_size; |
534 | gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; | 537 | gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; |
@@ -648,7 +651,7 @@ static int xennet_get_responses(struct netfront_info *np, | |||
648 | int err = 0; | 651 | int err = 0; |
649 | unsigned long ret; | 652 | unsigned long ret; |
650 | 653 | ||
651 | if (rx->flags & NETRXF_extra_info) { | 654 | if (rx->flags & XEN_NETRXF_extra_info) { |
652 | err = xennet_get_extras(np, extras, rp); | 655 | err = xennet_get_extras(np, extras, rp); |
653 | cons = np->rx.rsp_cons; | 656 | cons = np->rx.rsp_cons; |
654 | } | 657 | } |
@@ -685,7 +688,7 @@ static int xennet_get_responses(struct netfront_info *np, | |||
685 | __skb_queue_tail(list, skb); | 688 | __skb_queue_tail(list, skb); |
686 | 689 | ||
687 | next: | 690 | next: |
688 | if (!(rx->flags & NETRXF_more_data)) | 691 | if (!(rx->flags & XEN_NETRXF_more_data)) |
689 | break; | 692 | break; |
690 | 693 | ||
691 | if (cons + frags == rp) { | 694 | if (cons + frags == rp) { |
@@ -770,11 +773,29 @@ static RING_IDX xennet_fill_frags(struct netfront_info *np, | |||
770 | return cons; | 773 | return cons; |
771 | } | 774 | } |
772 | 775 | ||
773 | static int skb_checksum_setup(struct sk_buff *skb) | 776 | static int checksum_setup(struct net_device *dev, struct sk_buff *skb) |
774 | { | 777 | { |
775 | struct iphdr *iph; | 778 | struct iphdr *iph; |
776 | unsigned char *th; | 779 | unsigned char *th; |
777 | int err = -EPROTO; | 780 | int err = -EPROTO; |
781 | int recalculate_partial_csum = 0; | ||
782 | |||
783 | /* | ||
784 | * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy | ||
785 | * peers can fail to set NETRXF_csum_blank when sending a GSO | ||
786 | * frame. In this case force the SKB to CHECKSUM_PARTIAL and | ||
787 | * recalculate the partial checksum. | ||
788 | */ | ||
789 | if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) { | ||
790 | struct netfront_info *np = netdev_priv(dev); | ||
791 | np->rx_gso_checksum_fixup++; | ||
792 | skb->ip_summed = CHECKSUM_PARTIAL; | ||
793 | recalculate_partial_csum = 1; | ||
794 | } | ||
795 | |||
796 | /* A non-CHECKSUM_PARTIAL SKB does not require setup. */ | ||
797 | if (skb->ip_summed != CHECKSUM_PARTIAL) | ||
798 | return 0; | ||
778 | 799 | ||
779 | if (skb->protocol != htons(ETH_P_IP)) | 800 | if (skb->protocol != htons(ETH_P_IP)) |
780 | goto out; | 801 | goto out; |
@@ -788,9 +809,23 @@ static int skb_checksum_setup(struct sk_buff *skb) | |||
788 | switch (iph->protocol) { | 809 | switch (iph->protocol) { |
789 | case IPPROTO_TCP: | 810 | case IPPROTO_TCP: |
790 | skb->csum_offset = offsetof(struct tcphdr, check); | 811 | skb->csum_offset = offsetof(struct tcphdr, check); |
812 | |||
813 | if (recalculate_partial_csum) { | ||
814 | struct tcphdr *tcph = (struct tcphdr *)th; | ||
815 | tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, | ||
816 | skb->len - iph->ihl*4, | ||
817 | IPPROTO_TCP, 0); | ||
818 | } | ||
791 | break; | 819 | break; |
792 | case IPPROTO_UDP: | 820 | case IPPROTO_UDP: |
793 | skb->csum_offset = offsetof(struct udphdr, check); | 821 | skb->csum_offset = offsetof(struct udphdr, check); |
822 | |||
823 | if (recalculate_partial_csum) { | ||
824 | struct udphdr *udph = (struct udphdr *)th; | ||
825 | udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, | ||
826 | skb->len - iph->ihl*4, | ||
827 | IPPROTO_UDP, 0); | ||
828 | } | ||
794 | break; | 829 | break; |
795 | default: | 830 | default: |
796 | if (net_ratelimit()) | 831 | if (net_ratelimit()) |
@@ -829,13 +864,11 @@ static int handle_incoming_queue(struct net_device *dev, | |||
829 | /* Ethernet work: Delayed to here as it peeks the header. */ | 864 | /* Ethernet work: Delayed to here as it peeks the header. */ |
830 | skb->protocol = eth_type_trans(skb, dev); | 865 | skb->protocol = eth_type_trans(skb, dev); |
831 | 866 | ||
832 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 867 | if (checksum_setup(dev, skb)) { |
833 | if (skb_checksum_setup(skb)) { | 868 | kfree_skb(skb); |
834 | kfree_skb(skb); | 869 | packets_dropped++; |
835 | packets_dropped++; | 870 | dev->stats.rx_errors++; |
836 | dev->stats.rx_errors++; | 871 | continue; |
837 | continue; | ||
838 | } | ||
839 | } | 872 | } |
840 | 873 | ||
841 | dev->stats.rx_packets++; | 874 | dev->stats.rx_packets++; |
@@ -950,9 +983,9 @@ err: | |||
950 | skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len); | 983 | skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len); |
951 | skb->len += skb->data_len; | 984 | skb->len += skb->data_len; |
952 | 985 | ||
953 | if (rx->flags & NETRXF_csum_blank) | 986 | if (rx->flags & XEN_NETRXF_csum_blank) |
954 | skb->ip_summed = CHECKSUM_PARTIAL; | 987 | skb->ip_summed = CHECKSUM_PARTIAL; |
955 | else if (rx->flags & NETRXF_data_validated) | 988 | else if (rx->flags & XEN_NETRXF_data_validated) |
956 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 989 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
957 | 990 | ||
958 | __skb_queue_tail(&rxq, skb); | 991 | __skb_queue_tail(&rxq, skb); |
@@ -1107,6 +1140,42 @@ static void xennet_uninit(struct net_device *dev) | |||
1107 | gnttab_free_grant_references(np->gref_rx_head); | 1140 | gnttab_free_grant_references(np->gref_rx_head); |
1108 | } | 1141 | } |
1109 | 1142 | ||
1143 | static u32 xennet_fix_features(struct net_device *dev, u32 features) | ||
1144 | { | ||
1145 | struct netfront_info *np = netdev_priv(dev); | ||
1146 | int val; | ||
1147 | |||
1148 | if (features & NETIF_F_SG) { | ||
1149 | if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg", | ||
1150 | "%d", &val) < 0) | ||
1151 | val = 0; | ||
1152 | |||
1153 | if (!val) | ||
1154 | features &= ~NETIF_F_SG; | ||
1155 | } | ||
1156 | |||
1157 | if (features & NETIF_F_TSO) { | ||
1158 | if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, | ||
1159 | "feature-gso-tcpv4", "%d", &val) < 0) | ||
1160 | val = 0; | ||
1161 | |||
1162 | if (!val) | ||
1163 | features &= ~NETIF_F_TSO; | ||
1164 | } | ||
1165 | |||
1166 | return features; | ||
1167 | } | ||
1168 | |||
1169 | static int xennet_set_features(struct net_device *dev, u32 features) | ||
1170 | { | ||
1171 | if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) { | ||
1172 | netdev_info(dev, "Reducing MTU because no SG offload"); | ||
1173 | dev->mtu = ETH_DATA_LEN; | ||
1174 | } | ||
1175 | |||
1176 | return 0; | ||
1177 | } | ||
1178 | |||
1110 | static const struct net_device_ops xennet_netdev_ops = { | 1179 | static const struct net_device_ops xennet_netdev_ops = { |
1111 | .ndo_open = xennet_open, | 1180 | .ndo_open = xennet_open, |
1112 | .ndo_uninit = xennet_uninit, | 1181 | .ndo_uninit = xennet_uninit, |
@@ -1115,6 +1184,8 @@ static const struct net_device_ops xennet_netdev_ops = { | |||
1115 | .ndo_change_mtu = xennet_change_mtu, | 1184 | .ndo_change_mtu = xennet_change_mtu, |
1116 | .ndo_set_mac_address = eth_mac_addr, | 1185 | .ndo_set_mac_address = eth_mac_addr, |
1117 | .ndo_validate_addr = eth_validate_addr, | 1186 | .ndo_validate_addr = eth_validate_addr, |
1187 | .ndo_fix_features = xennet_fix_features, | ||
1188 | .ndo_set_features = xennet_set_features, | ||
1118 | }; | 1189 | }; |
1119 | 1190 | ||
1120 | static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev) | 1191 | static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev) |
@@ -1176,7 +1247,17 @@ static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev | |||
1176 | netdev->netdev_ops = &xennet_netdev_ops; | 1247 | netdev->netdev_ops = &xennet_netdev_ops; |
1177 | 1248 | ||
1178 | netif_napi_add(netdev, &np->napi, xennet_poll, 64); | 1249 | netif_napi_add(netdev, &np->napi, xennet_poll, 64); |
1179 | netdev->features = NETIF_F_IP_CSUM; | 1250 | netdev->features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM | |
1251 | NETIF_F_GSO_ROBUST; | ||
1252 | netdev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO; | ||
1253 | |||
1254 | /* | ||
1255 | * Assume that all hw features are available for now. This set | ||
1256 | * will be adjusted by the call to netdev_update_features() in | ||
1257 | * xennet_connect() which is the earliest point where we can | ||
1258 | * negotiate with the backend regarding supported features. | ||
1259 | */ | ||
1260 | netdev->features |= netdev->hw_features; | ||
1180 | 1261 | ||
1181 | SET_ETHTOOL_OPS(netdev, &xennet_ethtool_ops); | 1262 | SET_ETHTOOL_OPS(netdev, &xennet_ethtool_ops); |
1182 | SET_NETDEV_DEV(netdev, &dev->dev); | 1263 | SET_NETDEV_DEV(netdev, &dev->dev); |
@@ -1383,8 +1464,7 @@ static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info) | |||
1383 | goto fail; | 1464 | goto fail; |
1384 | 1465 | ||
1385 | err = bind_evtchn_to_irqhandler(info->evtchn, xennet_interrupt, | 1466 | err = bind_evtchn_to_irqhandler(info->evtchn, xennet_interrupt, |
1386 | IRQF_SAMPLE_RANDOM, netdev->name, | 1467 | 0, netdev->name, netdev); |
1387 | netdev); | ||
1388 | if (err < 0) | 1468 | if (err < 0) |
1389 | goto fail; | 1469 | goto fail; |
1390 | netdev->irq = err; | 1470 | netdev->irq = err; |
@@ -1395,7 +1475,7 @@ static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info) | |||
1395 | } | 1475 | } |
1396 | 1476 | ||
1397 | /* Common code used when first setting up, and when resuming. */ | 1477 | /* Common code used when first setting up, and when resuming. */ |
1398 | static int talk_to_backend(struct xenbus_device *dev, | 1478 | static int talk_to_netback(struct xenbus_device *dev, |
1399 | struct netfront_info *info) | 1479 | struct netfront_info *info) |
1400 | { | 1480 | { |
1401 | const char *message; | 1481 | const char *message; |
@@ -1477,54 +1557,6 @@ again: | |||
1477 | return err; | 1557 | return err; |
1478 | } | 1558 | } |
1479 | 1559 | ||
1480 | static int xennet_set_sg(struct net_device *dev, u32 data) | ||
1481 | { | ||
1482 | if (data) { | ||
1483 | struct netfront_info *np = netdev_priv(dev); | ||
1484 | int val; | ||
1485 | |||
1486 | if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg", | ||
1487 | "%d", &val) < 0) | ||
1488 | val = 0; | ||
1489 | if (!val) | ||
1490 | return -ENOSYS; | ||
1491 | } else if (dev->mtu > ETH_DATA_LEN) | ||
1492 | dev->mtu = ETH_DATA_LEN; | ||
1493 | |||
1494 | return ethtool_op_set_sg(dev, data); | ||
1495 | } | ||
1496 | |||
1497 | static int xennet_set_tso(struct net_device *dev, u32 data) | ||
1498 | { | ||
1499 | if (data) { | ||
1500 | struct netfront_info *np = netdev_priv(dev); | ||
1501 | int val; | ||
1502 | |||
1503 | if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, | ||
1504 | "feature-gso-tcpv4", "%d", &val) < 0) | ||
1505 | val = 0; | ||
1506 | if (!val) | ||
1507 | return -ENOSYS; | ||
1508 | } | ||
1509 | |||
1510 | return ethtool_op_set_tso(dev, data); | ||
1511 | } | ||
1512 | |||
1513 | static void xennet_set_features(struct net_device *dev) | ||
1514 | { | ||
1515 | /* Turn off all GSO bits except ROBUST. */ | ||
1516 | dev->features &= ~NETIF_F_GSO_MASK; | ||
1517 | dev->features |= NETIF_F_GSO_ROBUST; | ||
1518 | xennet_set_sg(dev, 0); | ||
1519 | |||
1520 | /* We need checksum offload to enable scatter/gather and TSO. */ | ||
1521 | if (!(dev->features & NETIF_F_IP_CSUM)) | ||
1522 | return; | ||
1523 | |||
1524 | if (!xennet_set_sg(dev, 1)) | ||
1525 | xennet_set_tso(dev, 1); | ||
1526 | } | ||
1527 | |||
1528 | static int xennet_connect(struct net_device *dev) | 1560 | static int xennet_connect(struct net_device *dev) |
1529 | { | 1561 | { |
1530 | struct netfront_info *np = netdev_priv(dev); | 1562 | struct netfront_info *np = netdev_priv(dev); |
@@ -1545,11 +1577,13 @@ static int xennet_connect(struct net_device *dev) | |||
1545 | return -ENODEV; | 1577 | return -ENODEV; |
1546 | } | 1578 | } |
1547 | 1579 | ||
1548 | err = talk_to_backend(np->xbdev, np); | 1580 | err = talk_to_netback(np->xbdev, np); |
1549 | if (err) | 1581 | if (err) |
1550 | return err; | 1582 | return err; |
1551 | 1583 | ||
1552 | xennet_set_features(dev); | 1584 | rtnl_lock(); |
1585 | netdev_update_features(dev); | ||
1586 | rtnl_unlock(); | ||
1553 | 1587 | ||
1554 | spin_lock_bh(&np->rx_lock); | 1588 | spin_lock_bh(&np->rx_lock); |
1555 | spin_lock_irq(&np->tx_lock); | 1589 | spin_lock_irq(&np->tx_lock); |
@@ -1599,7 +1633,7 @@ static int xennet_connect(struct net_device *dev) | |||
1599 | /** | 1633 | /** |
1600 | * Callback received when the backend's state changes. | 1634 | * Callback received when the backend's state changes. |
1601 | */ | 1635 | */ |
1602 | static void backend_changed(struct xenbus_device *dev, | 1636 | static void netback_changed(struct xenbus_device *dev, |
1603 | enum xenbus_state backend_state) | 1637 | enum xenbus_state backend_state) |
1604 | { | 1638 | { |
1605 | struct netfront_info *np = dev_get_drvdata(&dev->dev); | 1639 | struct netfront_info *np = dev_get_drvdata(&dev->dev); |
@@ -1610,6 +1644,8 @@ static void backend_changed(struct xenbus_device *dev, | |||
1610 | switch (backend_state) { | 1644 | switch (backend_state) { |
1611 | case XenbusStateInitialising: | 1645 | case XenbusStateInitialising: |
1612 | case XenbusStateInitialised: | 1646 | case XenbusStateInitialised: |
1647 | case XenbusStateReconfiguring: | ||
1648 | case XenbusStateReconfigured: | ||
1613 | case XenbusStateConnected: | 1649 | case XenbusStateConnected: |
1614 | case XenbusStateUnknown: | 1650 | case XenbusStateUnknown: |
1615 | case XenbusStateClosed: | 1651 | case XenbusStateClosed: |
@@ -1630,12 +1666,56 @@ static void backend_changed(struct xenbus_device *dev, | |||
1630 | } | 1666 | } |
1631 | } | 1667 | } |
1632 | 1668 | ||
1669 | static const struct xennet_stat { | ||
1670 | char name[ETH_GSTRING_LEN]; | ||
1671 | u16 offset; | ||
1672 | } xennet_stats[] = { | ||
1673 | { | ||
1674 | "rx_gso_checksum_fixup", | ||
1675 | offsetof(struct netfront_info, rx_gso_checksum_fixup) | ||
1676 | }, | ||
1677 | }; | ||
1678 | |||
1679 | static int xennet_get_sset_count(struct net_device *dev, int string_set) | ||
1680 | { | ||
1681 | switch (string_set) { | ||
1682 | case ETH_SS_STATS: | ||
1683 | return ARRAY_SIZE(xennet_stats); | ||
1684 | default: | ||
1685 | return -EINVAL; | ||
1686 | } | ||
1687 | } | ||
1688 | |||
1689 | static void xennet_get_ethtool_stats(struct net_device *dev, | ||
1690 | struct ethtool_stats *stats, u64 * data) | ||
1691 | { | ||
1692 | void *np = netdev_priv(dev); | ||
1693 | int i; | ||
1694 | |||
1695 | for (i = 0; i < ARRAY_SIZE(xennet_stats); i++) | ||
1696 | data[i] = *(unsigned long *)(np + xennet_stats[i].offset); | ||
1697 | } | ||
1698 | |||
1699 | static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data) | ||
1700 | { | ||
1701 | int i; | ||
1702 | |||
1703 | switch (stringset) { | ||
1704 | case ETH_SS_STATS: | ||
1705 | for (i = 0; i < ARRAY_SIZE(xennet_stats); i++) | ||
1706 | memcpy(data + i * ETH_GSTRING_LEN, | ||
1707 | xennet_stats[i].name, ETH_GSTRING_LEN); | ||
1708 | break; | ||
1709 | } | ||
1710 | } | ||
1711 | |||
1633 | static const struct ethtool_ops xennet_ethtool_ops = | 1712 | static const struct ethtool_ops xennet_ethtool_ops = |
1634 | { | 1713 | { |
1635 | .set_tx_csum = ethtool_op_set_tx_csum, | ||
1636 | .set_sg = xennet_set_sg, | ||
1637 | .set_tso = xennet_set_tso, | ||
1638 | .get_link = ethtool_op_get_link, | 1714 | .get_link = ethtool_op_get_link, |
1715 | |||
1716 | .get_sset_count = xennet_get_sset_count, | ||
1717 | .get_ethtool_stats = xennet_get_ethtool_stats, | ||
1718 | .get_strings = xennet_get_strings, | ||
1639 | }; | 1719 | }; |
1640 | 1720 | ||
1641 | #ifdef CONFIG_SYSFS | 1721 | #ifdef CONFIG_SYSFS |
@@ -1801,7 +1881,7 @@ static struct xenbus_driver netfront_driver = { | |||
1801 | .probe = netfront_probe, | 1881 | .probe = netfront_probe, |
1802 | .remove = __devexit_p(xennet_remove), | 1882 | .remove = __devexit_p(xennet_remove), |
1803 | .resume = netfront_resume, | 1883 | .resume = netfront_resume, |
1804 | .otherend_changed = backend_changed, | 1884 | .otherend_changed = netback_changed, |
1805 | }; | 1885 | }; |
1806 | 1886 | ||
1807 | static int __init netif_init(void) | 1887 | static int __init netif_init(void) |