diff options
Diffstat (limited to 'drivers/net/xen-netfront.c')
-rw-r--r-- | drivers/net/xen-netfront.c | 208 |
1 files changed, 90 insertions, 118 deletions
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 36808bf25677..f9daa9e183f2 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c | |||
@@ -117,6 +117,7 @@ struct netfront_info { | |||
117 | } tx_skbs[NET_TX_RING_SIZE]; | 117 | } tx_skbs[NET_TX_RING_SIZE]; |
118 | grant_ref_t gref_tx_head; | 118 | grant_ref_t gref_tx_head; |
119 | grant_ref_t grant_tx_ref[NET_TX_RING_SIZE]; | 119 | grant_ref_t grant_tx_ref[NET_TX_RING_SIZE]; |
120 | struct page *grant_tx_page[NET_TX_RING_SIZE]; | ||
120 | unsigned tx_skb_freelist; | 121 | unsigned tx_skb_freelist; |
121 | 122 | ||
122 | spinlock_t rx_lock ____cacheline_aligned_in_smp; | 123 | spinlock_t rx_lock ____cacheline_aligned_in_smp; |
@@ -277,12 +278,13 @@ static void xennet_alloc_rx_buffers(struct net_device *dev) | |||
277 | if (!page) { | 278 | if (!page) { |
278 | kfree_skb(skb); | 279 | kfree_skb(skb); |
279 | no_skb: | 280 | no_skb: |
280 | /* Any skbuffs queued for refill? Force them out. */ | ||
281 | if (i != 0) | ||
282 | goto refill; | ||
283 | /* Could not allocate any skbuffs. Try again later. */ | 281 | /* Could not allocate any skbuffs. Try again later. */ |
284 | mod_timer(&np->rx_refill_timer, | 282 | mod_timer(&np->rx_refill_timer, |
285 | jiffies + (HZ/10)); | 283 | jiffies + (HZ/10)); |
284 | |||
285 | /* Any skbuffs queued for refill? Force them out. */ | ||
286 | if (i != 0) | ||
287 | goto refill; | ||
286 | break; | 288 | break; |
287 | } | 289 | } |
288 | 290 | ||
@@ -395,6 +397,7 @@ static void xennet_tx_buf_gc(struct net_device *dev) | |||
395 | gnttab_release_grant_reference( | 397 | gnttab_release_grant_reference( |
396 | &np->gref_tx_head, np->grant_tx_ref[id]); | 398 | &np->gref_tx_head, np->grant_tx_ref[id]); |
397 | np->grant_tx_ref[id] = GRANT_INVALID_REF; | 399 | np->grant_tx_ref[id] = GRANT_INVALID_REF; |
400 | np->grant_tx_page[id] = NULL; | ||
398 | add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, id); | 401 | add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, id); |
399 | dev_kfree_skb_irq(skb); | 402 | dev_kfree_skb_irq(skb); |
400 | } | 403 | } |
@@ -451,6 +454,7 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev, | |||
451 | gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, | 454 | gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, |
452 | mfn, GNTMAP_readonly); | 455 | mfn, GNTMAP_readonly); |
453 | 456 | ||
457 | np->grant_tx_page[id] = virt_to_page(data); | ||
454 | tx->gref = np->grant_tx_ref[id] = ref; | 458 | tx->gref = np->grant_tx_ref[id] = ref; |
455 | tx->offset = offset; | 459 | tx->offset = offset; |
456 | tx->size = len; | 460 | tx->size = len; |
@@ -496,6 +500,7 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev, | |||
496 | np->xbdev->otherend_id, | 500 | np->xbdev->otherend_id, |
497 | mfn, GNTMAP_readonly); | 501 | mfn, GNTMAP_readonly); |
498 | 502 | ||
503 | np->grant_tx_page[id] = page; | ||
499 | tx->gref = np->grant_tx_ref[id] = ref; | 504 | tx->gref = np->grant_tx_ref[id] = ref; |
500 | tx->offset = offset; | 505 | tx->offset = offset; |
501 | tx->size = bytes; | 506 | tx->size = bytes; |
@@ -595,6 +600,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
595 | mfn = virt_to_mfn(data); | 600 | mfn = virt_to_mfn(data); |
596 | gnttab_grant_foreign_access_ref( | 601 | gnttab_grant_foreign_access_ref( |
597 | ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly); | 602 | ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly); |
603 | np->grant_tx_page[id] = virt_to_page(data); | ||
598 | tx->gref = np->grant_tx_ref[id] = ref; | 604 | tx->gref = np->grant_tx_ref[id] = ref; |
599 | tx->offset = offset; | 605 | tx->offset = offset; |
600 | tx->size = len; | 606 | tx->size = len; |
@@ -616,7 +622,9 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
616 | tx->flags |= XEN_NETTXF_extra_info; | 622 | tx->flags |= XEN_NETTXF_extra_info; |
617 | 623 | ||
618 | gso->u.gso.size = skb_shinfo(skb)->gso_size; | 624 | gso->u.gso.size = skb_shinfo(skb)->gso_size; |
619 | gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; | 625 | gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ? |
626 | XEN_NETIF_GSO_TYPE_TCPV6 : | ||
627 | XEN_NETIF_GSO_TYPE_TCPV4; | ||
620 | gso->u.gso.pad = 0; | 628 | gso->u.gso.pad = 0; |
621 | gso->u.gso.features = 0; | 629 | gso->u.gso.features = 0; |
622 | 630 | ||
@@ -808,15 +816,18 @@ static int xennet_set_skb_gso(struct sk_buff *skb, | |||
808 | return -EINVAL; | 816 | return -EINVAL; |
809 | } | 817 | } |
810 | 818 | ||
811 | /* Currently only TCPv4 S.O. is supported. */ | 819 | if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4 && |
812 | if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) { | 820 | gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV6) { |
813 | if (net_ratelimit()) | 821 | if (net_ratelimit()) |
814 | pr_warn("Bad GSO type %d\n", gso->u.gso.type); | 822 | pr_warn("Bad GSO type %d\n", gso->u.gso.type); |
815 | return -EINVAL; | 823 | return -EINVAL; |
816 | } | 824 | } |
817 | 825 | ||
818 | skb_shinfo(skb)->gso_size = gso->u.gso.size; | 826 | skb_shinfo(skb)->gso_size = gso->u.gso.size; |
819 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; | 827 | skb_shinfo(skb)->gso_type = |
828 | (gso->u.gso.type == XEN_NETIF_GSO_TYPE_TCPV4) ? | ||
829 | SKB_GSO_TCPV4 : | ||
830 | SKB_GSO_TCPV6; | ||
820 | 831 | ||
821 | /* Header must be checked, and gso_segs computed. */ | 832 | /* Header must be checked, and gso_segs computed. */ |
822 | skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; | 833 | skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; |
@@ -858,9 +869,7 @@ static RING_IDX xennet_fill_frags(struct netfront_info *np, | |||
858 | 869 | ||
859 | static int checksum_setup(struct net_device *dev, struct sk_buff *skb) | 870 | static int checksum_setup(struct net_device *dev, struct sk_buff *skb) |
860 | { | 871 | { |
861 | struct iphdr *iph; | 872 | bool recalculate_partial_csum = false; |
862 | int err = -EPROTO; | ||
863 | int recalculate_partial_csum = 0; | ||
864 | 873 | ||
865 | /* | 874 | /* |
866 | * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy | 875 | * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy |
@@ -872,54 +881,14 @@ static int checksum_setup(struct net_device *dev, struct sk_buff *skb) | |||
872 | struct netfront_info *np = netdev_priv(dev); | 881 | struct netfront_info *np = netdev_priv(dev); |
873 | np->rx_gso_checksum_fixup++; | 882 | np->rx_gso_checksum_fixup++; |
874 | skb->ip_summed = CHECKSUM_PARTIAL; | 883 | skb->ip_summed = CHECKSUM_PARTIAL; |
875 | recalculate_partial_csum = 1; | 884 | recalculate_partial_csum = true; |
876 | } | 885 | } |
877 | 886 | ||
878 | /* A non-CHECKSUM_PARTIAL SKB does not require setup. */ | 887 | /* A non-CHECKSUM_PARTIAL SKB does not require setup. */ |
879 | if (skb->ip_summed != CHECKSUM_PARTIAL) | 888 | if (skb->ip_summed != CHECKSUM_PARTIAL) |
880 | return 0; | 889 | return 0; |
881 | 890 | ||
882 | if (skb->protocol != htons(ETH_P_IP)) | 891 | return skb_checksum_setup(skb, recalculate_partial_csum); |
883 | goto out; | ||
884 | |||
885 | iph = (void *)skb->data; | ||
886 | |||
887 | switch (iph->protocol) { | ||
888 | case IPPROTO_TCP: | ||
889 | if (!skb_partial_csum_set(skb, 4 * iph->ihl, | ||
890 | offsetof(struct tcphdr, check))) | ||
891 | goto out; | ||
892 | |||
893 | if (recalculate_partial_csum) { | ||
894 | struct tcphdr *tcph = tcp_hdr(skb); | ||
895 | tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, | ||
896 | skb->len - iph->ihl*4, | ||
897 | IPPROTO_TCP, 0); | ||
898 | } | ||
899 | break; | ||
900 | case IPPROTO_UDP: | ||
901 | if (!skb_partial_csum_set(skb, 4 * iph->ihl, | ||
902 | offsetof(struct udphdr, check))) | ||
903 | goto out; | ||
904 | |||
905 | if (recalculate_partial_csum) { | ||
906 | struct udphdr *udph = udp_hdr(skb); | ||
907 | udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, | ||
908 | skb->len - iph->ihl*4, | ||
909 | IPPROTO_UDP, 0); | ||
910 | } | ||
911 | break; | ||
912 | default: | ||
913 | if (net_ratelimit()) | ||
914 | pr_err("Attempting to checksum a non-TCP/UDP packet, dropping a protocol %d packet\n", | ||
915 | iph->protocol); | ||
916 | goto out; | ||
917 | } | ||
918 | |||
919 | err = 0; | ||
920 | |||
921 | out: | ||
922 | return err; | ||
923 | } | 892 | } |
924 | 893 | ||
925 | static int handle_incoming_queue(struct net_device *dev, | 894 | static int handle_incoming_queue(struct net_device *dev, |
@@ -952,7 +921,7 @@ static int handle_incoming_queue(struct net_device *dev, | |||
952 | u64_stats_update_end(&stats->syncp); | 921 | u64_stats_update_end(&stats->syncp); |
953 | 922 | ||
954 | /* Pass it up. */ | 923 | /* Pass it up. */ |
955 | netif_receive_skb(skb); | 924 | napi_gro_receive(&np->napi, skb); |
956 | } | 925 | } |
957 | 926 | ||
958 | return packets_dropped; | 927 | return packets_dropped; |
@@ -1051,6 +1020,8 @@ err: | |||
1051 | if (work_done < budget) { | 1020 | if (work_done < budget) { |
1052 | int more_to_do = 0; | 1021 | int more_to_do = 0; |
1053 | 1022 | ||
1023 | napi_gro_flush(napi, false); | ||
1024 | |||
1054 | local_irq_save(flags); | 1025 | local_irq_save(flags); |
1055 | 1026 | ||
1056 | RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do); | 1027 | RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do); |
@@ -1119,10 +1090,11 @@ static void xennet_release_tx_bufs(struct netfront_info *np) | |||
1119 | continue; | 1090 | continue; |
1120 | 1091 | ||
1121 | skb = np->tx_skbs[i].skb; | 1092 | skb = np->tx_skbs[i].skb; |
1122 | gnttab_end_foreign_access_ref(np->grant_tx_ref[i], | 1093 | get_page(np->grant_tx_page[i]); |
1123 | GNTMAP_readonly); | 1094 | gnttab_end_foreign_access(np->grant_tx_ref[i], |
1124 | gnttab_release_grant_reference(&np->gref_tx_head, | 1095 | GNTMAP_readonly, |
1125 | np->grant_tx_ref[i]); | 1096 | (unsigned long)page_address(np->grant_tx_page[i])); |
1097 | np->grant_tx_page[i] = NULL; | ||
1126 | np->grant_tx_ref[i] = GRANT_INVALID_REF; | 1098 | np->grant_tx_ref[i] = GRANT_INVALID_REF; |
1127 | add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, i); | 1099 | add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, i); |
1128 | dev_kfree_skb_irq(skb); | 1100 | dev_kfree_skb_irq(skb); |
@@ -1131,78 +1103,35 @@ static void xennet_release_tx_bufs(struct netfront_info *np) | |||
1131 | 1103 | ||
1132 | static void xennet_release_rx_bufs(struct netfront_info *np) | 1104 | static void xennet_release_rx_bufs(struct netfront_info *np) |
1133 | { | 1105 | { |
1134 | struct mmu_update *mmu = np->rx_mmu; | ||
1135 | struct multicall_entry *mcl = np->rx_mcl; | ||
1136 | struct sk_buff_head free_list; | ||
1137 | struct sk_buff *skb; | ||
1138 | unsigned long mfn; | ||
1139 | int xfer = 0, noxfer = 0, unused = 0; | ||
1140 | int id, ref; | 1106 | int id, ref; |
1141 | 1107 | ||
1142 | dev_warn(&np->netdev->dev, "%s: fix me for copying receiver.\n", | ||
1143 | __func__); | ||
1144 | return; | ||
1145 | |||
1146 | skb_queue_head_init(&free_list); | ||
1147 | |||
1148 | spin_lock_bh(&np->rx_lock); | 1108 | spin_lock_bh(&np->rx_lock); |
1149 | 1109 | ||
1150 | for (id = 0; id < NET_RX_RING_SIZE; id++) { | 1110 | for (id = 0; id < NET_RX_RING_SIZE; id++) { |
1151 | ref = np->grant_rx_ref[id]; | 1111 | struct sk_buff *skb; |
1152 | if (ref == GRANT_INVALID_REF) { | 1112 | struct page *page; |
1153 | unused++; | ||
1154 | continue; | ||
1155 | } | ||
1156 | 1113 | ||
1157 | skb = np->rx_skbs[id]; | 1114 | skb = np->rx_skbs[id]; |
1158 | mfn = gnttab_end_foreign_transfer_ref(ref); | 1115 | if (!skb) |
1159 | gnttab_release_grant_reference(&np->gref_rx_head, ref); | ||
1160 | np->grant_rx_ref[id] = GRANT_INVALID_REF; | ||
1161 | |||
1162 | if (0 == mfn) { | ||
1163 | skb_shinfo(skb)->nr_frags = 0; | ||
1164 | dev_kfree_skb(skb); | ||
1165 | noxfer++; | ||
1166 | continue; | 1116 | continue; |
1167 | } | ||
1168 | 1117 | ||
1169 | if (!xen_feature(XENFEAT_auto_translated_physmap)) { | 1118 | ref = np->grant_rx_ref[id]; |
1170 | /* Remap the page. */ | 1119 | if (ref == GRANT_INVALID_REF) |
1171 | const struct page *page = | 1120 | continue; |
1172 | skb_frag_page(&skb_shinfo(skb)->frags[0]); | ||
1173 | unsigned long pfn = page_to_pfn(page); | ||
1174 | void *vaddr = page_address(page); | ||
1175 | 1121 | ||
1176 | MULTI_update_va_mapping(mcl, (unsigned long)vaddr, | 1122 | page = skb_frag_page(&skb_shinfo(skb)->frags[0]); |
1177 | mfn_pte(mfn, PAGE_KERNEL), | ||
1178 | 0); | ||
1179 | mcl++; | ||
1180 | mmu->ptr = ((u64)mfn << PAGE_SHIFT) | ||
1181 | | MMU_MACHPHYS_UPDATE; | ||
1182 | mmu->val = pfn; | ||
1183 | mmu++; | ||
1184 | 1123 | ||
1185 | set_phys_to_machine(pfn, mfn); | 1124 | /* gnttab_end_foreign_access() needs a page ref until |
1186 | } | 1125 | * foreign access is ended (which may be deferred). |
1187 | __skb_queue_tail(&free_list, skb); | 1126 | */ |
1188 | xfer++; | 1127 | get_page(page); |
1189 | } | 1128 | gnttab_end_foreign_access(ref, 0, |
1190 | 1129 | (unsigned long)page_address(page)); | |
1191 | dev_info(&np->netdev->dev, "%s: %d xfer, %d noxfer, %d unused\n", | 1130 | np->grant_rx_ref[id] = GRANT_INVALID_REF; |
1192 | __func__, xfer, noxfer, unused); | ||
1193 | 1131 | ||
1194 | if (xfer) { | 1132 | kfree_skb(skb); |
1195 | if (!xen_feature(XENFEAT_auto_translated_physmap)) { | ||
1196 | /* Do all the remapping work and M2P updates. */ | ||
1197 | MULTI_mmu_update(mcl, np->rx_mmu, mmu - np->rx_mmu, | ||
1198 | NULL, DOMID_SELF); | ||
1199 | mcl++; | ||
1200 | HYPERVISOR_multicall(np->rx_mcl, mcl - np->rx_mcl); | ||
1201 | } | ||
1202 | } | 1133 | } |
1203 | 1134 | ||
1204 | __skb_queue_purge(&free_list); | ||
1205 | |||
1206 | spin_unlock_bh(&np->rx_lock); | 1135 | spin_unlock_bh(&np->rx_lock); |
1207 | } | 1136 | } |
1208 | 1137 | ||
@@ -1230,6 +1159,15 @@ static netdev_features_t xennet_fix_features(struct net_device *dev, | |||
1230 | features &= ~NETIF_F_SG; | 1159 | features &= ~NETIF_F_SG; |
1231 | } | 1160 | } |
1232 | 1161 | ||
1162 | if (features & NETIF_F_IPV6_CSUM) { | ||
1163 | if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, | ||
1164 | "feature-ipv6-csum-offload", "%d", &val) < 0) | ||
1165 | val = 0; | ||
1166 | |||
1167 | if (!val) | ||
1168 | features &= ~NETIF_F_IPV6_CSUM; | ||
1169 | } | ||
1170 | |||
1233 | if (features & NETIF_F_TSO) { | 1171 | if (features & NETIF_F_TSO) { |
1234 | if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, | 1172 | if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, |
1235 | "feature-gso-tcpv4", "%d", &val) < 0) | 1173 | "feature-gso-tcpv4", "%d", &val) < 0) |
@@ -1239,6 +1177,15 @@ static netdev_features_t xennet_fix_features(struct net_device *dev, | |||
1239 | features &= ~NETIF_F_TSO; | 1177 | features &= ~NETIF_F_TSO; |
1240 | } | 1178 | } |
1241 | 1179 | ||
1180 | if (features & NETIF_F_TSO6) { | ||
1181 | if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, | ||
1182 | "feature-gso-tcpv6", "%d", &val) < 0) | ||
1183 | val = 0; | ||
1184 | |||
1185 | if (!val) | ||
1186 | features &= ~NETIF_F_TSO6; | ||
1187 | } | ||
1188 | |||
1242 | return features; | 1189 | return features; |
1243 | } | 1190 | } |
1244 | 1191 | ||
@@ -1338,6 +1285,12 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev) | |||
1338 | if (np->stats == NULL) | 1285 | if (np->stats == NULL) |
1339 | goto exit; | 1286 | goto exit; |
1340 | 1287 | ||
1288 | for_each_possible_cpu(i) { | ||
1289 | struct netfront_stats *xen_nf_stats; | ||
1290 | xen_nf_stats = per_cpu_ptr(np->stats, i); | ||
1291 | u64_stats_init(&xen_nf_stats->syncp); | ||
1292 | } | ||
1293 | |||
1341 | /* Initialise tx_skbs as a free chain containing every entry. */ | 1294 | /* Initialise tx_skbs as a free chain containing every entry. */ |
1342 | np->tx_skb_freelist = 0; | 1295 | np->tx_skb_freelist = 0; |
1343 | for (i = 0; i < NET_TX_RING_SIZE; i++) { | 1296 | for (i = 0; i < NET_TX_RING_SIZE; i++) { |
@@ -1349,6 +1302,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev) | |||
1349 | for (i = 0; i < NET_RX_RING_SIZE; i++) { | 1302 | for (i = 0; i < NET_RX_RING_SIZE; i++) { |
1350 | np->rx_skbs[i] = NULL; | 1303 | np->rx_skbs[i] = NULL; |
1351 | np->grant_rx_ref[i] = GRANT_INVALID_REF; | 1304 | np->grant_rx_ref[i] = GRANT_INVALID_REF; |
1305 | np->grant_tx_page[i] = NULL; | ||
1352 | } | 1306 | } |
1353 | 1307 | ||
1354 | /* A grant for every tx ring slot */ | 1308 | /* A grant for every tx ring slot */ |
@@ -1371,7 +1325,9 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev) | |||
1371 | netif_napi_add(netdev, &np->napi, xennet_poll, 64); | 1325 | netif_napi_add(netdev, &np->napi, xennet_poll, 64); |
1372 | netdev->features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM | | 1326 | netdev->features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM | |
1373 | NETIF_F_GSO_ROBUST; | 1327 | NETIF_F_GSO_ROBUST; |
1374 | netdev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO; | 1328 | netdev->hw_features = NETIF_F_SG | |
1329 | NETIF_F_IPV6_CSUM | | ||
1330 | NETIF_F_TSO | NETIF_F_TSO6; | ||
1375 | 1331 | ||
1376 | /* | 1332 | /* |
1377 | * Assume that all hw features are available for now. This set | 1333 | * Assume that all hw features are available for now. This set |
@@ -1749,6 +1705,19 @@ again: | |||
1749 | goto abort_transaction; | 1705 | goto abort_transaction; |
1750 | } | 1706 | } |
1751 | 1707 | ||
1708 | err = xenbus_write(xbt, dev->nodename, "feature-gso-tcpv6", "1"); | ||
1709 | if (err) { | ||
1710 | message = "writing feature-gso-tcpv6"; | ||
1711 | goto abort_transaction; | ||
1712 | } | ||
1713 | |||
1714 | err = xenbus_write(xbt, dev->nodename, "feature-ipv6-csum-offload", | ||
1715 | "1"); | ||
1716 | if (err) { | ||
1717 | message = "writing feature-ipv6-csum-offload"; | ||
1718 | goto abort_transaction; | ||
1719 | } | ||
1720 | |||
1752 | err = xenbus_transaction_end(xbt, 0); | 1721 | err = xenbus_transaction_end(xbt, 0); |
1753 | if (err) { | 1722 | if (err) { |
1754 | if (err == -EAGAIN) | 1723 | if (err == -EAGAIN) |
@@ -1863,7 +1832,6 @@ static void netback_changed(struct xenbus_device *dev, | |||
1863 | case XenbusStateReconfiguring: | 1832 | case XenbusStateReconfiguring: |
1864 | case XenbusStateReconfigured: | 1833 | case XenbusStateReconfigured: |
1865 | case XenbusStateUnknown: | 1834 | case XenbusStateUnknown: |
1866 | case XenbusStateClosed: | ||
1867 | break; | 1835 | break; |
1868 | 1836 | ||
1869 | case XenbusStateInitWait: | 1837 | case XenbusStateInitWait: |
@@ -1878,6 +1846,10 @@ static void netback_changed(struct xenbus_device *dev, | |||
1878 | netdev_notify_peers(netdev); | 1846 | netdev_notify_peers(netdev); |
1879 | break; | 1847 | break; |
1880 | 1848 | ||
1849 | case XenbusStateClosed: | ||
1850 | if (dev->state == XenbusStateClosed) | ||
1851 | break; | ||
1852 | /* Missed the backend's CLOSING state -- fallthrough */ | ||
1881 | case XenbusStateClosing: | 1853 | case XenbusStateClosing: |
1882 | xenbus_frontend_closed(dev); | 1854 | xenbus_frontend_closed(dev); |
1883 | break; | 1855 | break; |
@@ -2106,7 +2078,7 @@ static int __init netif_init(void) | |||
2106 | if (!xen_domain()) | 2078 | if (!xen_domain()) |
2107 | return -ENODEV; | 2079 | return -ENODEV; |
2108 | 2080 | ||
2109 | if (xen_hvm_domain() && !xen_platform_pci_unplug) | 2081 | if (!xen_has_pv_nic_devices()) |
2110 | return -ENODEV; | 2082 | return -ENODEV; |
2111 | 2083 | ||
2112 | pr_info("Initialising Xen virtual ethernet driver\n"); | 2084 | pr_info("Initialising Xen virtual ethernet driver\n"); |