aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netfront.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/xen-netfront.c')
-rw-r--r--drivers/net/xen-netfront.c192
1 files changed, 78 insertions, 114 deletions
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index e59acb1daa23..e30d80033cbc 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -117,6 +117,7 @@ struct netfront_info {
117 } tx_skbs[NET_TX_RING_SIZE]; 117 } tx_skbs[NET_TX_RING_SIZE];
118 grant_ref_t gref_tx_head; 118 grant_ref_t gref_tx_head;
119 grant_ref_t grant_tx_ref[NET_TX_RING_SIZE]; 119 grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
120 struct page *grant_tx_page[NET_TX_RING_SIZE];
120 unsigned tx_skb_freelist; 121 unsigned tx_skb_freelist;
121 122
122 spinlock_t rx_lock ____cacheline_aligned_in_smp; 123 spinlock_t rx_lock ____cacheline_aligned_in_smp;
@@ -396,6 +397,7 @@ static void xennet_tx_buf_gc(struct net_device *dev)
396 gnttab_release_grant_reference( 397 gnttab_release_grant_reference(
397 &np->gref_tx_head, np->grant_tx_ref[id]); 398 &np->gref_tx_head, np->grant_tx_ref[id]);
398 np->grant_tx_ref[id] = GRANT_INVALID_REF; 399 np->grant_tx_ref[id] = GRANT_INVALID_REF;
400 np->grant_tx_page[id] = NULL;
399 add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, id); 401 add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, id);
400 dev_kfree_skb_irq(skb); 402 dev_kfree_skb_irq(skb);
401 } 403 }
@@ -452,6 +454,7 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
452 gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, 454 gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
453 mfn, GNTMAP_readonly); 455 mfn, GNTMAP_readonly);
454 456
457 np->grant_tx_page[id] = virt_to_page(data);
455 tx->gref = np->grant_tx_ref[id] = ref; 458 tx->gref = np->grant_tx_ref[id] = ref;
456 tx->offset = offset; 459 tx->offset = offset;
457 tx->size = len; 460 tx->size = len;
@@ -497,6 +500,7 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
497 np->xbdev->otherend_id, 500 np->xbdev->otherend_id,
498 mfn, GNTMAP_readonly); 501 mfn, GNTMAP_readonly);
499 502
503 np->grant_tx_page[id] = page;
500 tx->gref = np->grant_tx_ref[id] = ref; 504 tx->gref = np->grant_tx_ref[id] = ref;
501 tx->offset = offset; 505 tx->offset = offset;
502 tx->size = bytes; 506 tx->size = bytes;
@@ -596,6 +600,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
596 mfn = virt_to_mfn(data); 600 mfn = virt_to_mfn(data);
597 gnttab_grant_foreign_access_ref( 601 gnttab_grant_foreign_access_ref(
598 ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly); 602 ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly);
603 np->grant_tx_page[id] = virt_to_page(data);
599 tx->gref = np->grant_tx_ref[id] = ref; 604 tx->gref = np->grant_tx_ref[id] = ref;
600 tx->offset = offset; 605 tx->offset = offset;
601 tx->size = len; 606 tx->size = len;
@@ -617,7 +622,9 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
617 tx->flags |= XEN_NETTXF_extra_info; 622 tx->flags |= XEN_NETTXF_extra_info;
618 623
619 gso->u.gso.size = skb_shinfo(skb)->gso_size; 624 gso->u.gso.size = skb_shinfo(skb)->gso_size;
620 gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; 625 gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ?
626 XEN_NETIF_GSO_TYPE_TCPV6 :
627 XEN_NETIF_GSO_TYPE_TCPV4;
621 gso->u.gso.pad = 0; 628 gso->u.gso.pad = 0;
622 gso->u.gso.features = 0; 629 gso->u.gso.features = 0;
623 630
@@ -809,15 +816,18 @@ static int xennet_set_skb_gso(struct sk_buff *skb,
809 return -EINVAL; 816 return -EINVAL;
810 } 817 }
811 818
812 /* Currently only TCPv4 S.O. is supported. */ 819 if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4 &&
813 if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) { 820 gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV6) {
814 if (net_ratelimit()) 821 if (net_ratelimit())
815 pr_warn("Bad GSO type %d\n", gso->u.gso.type); 822 pr_warn("Bad GSO type %d\n", gso->u.gso.type);
816 return -EINVAL; 823 return -EINVAL;
817 } 824 }
818 825
819 skb_shinfo(skb)->gso_size = gso->u.gso.size; 826 skb_shinfo(skb)->gso_size = gso->u.gso.size;
820 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; 827 skb_shinfo(skb)->gso_type =
828 (gso->u.gso.type == XEN_NETIF_GSO_TYPE_TCPV4) ?
829 SKB_GSO_TCPV4 :
830 SKB_GSO_TCPV6;
821 831
822 /* Header must be checked, and gso_segs computed. */ 832 /* Header must be checked, and gso_segs computed. */
823 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; 833 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
@@ -859,9 +869,7 @@ static RING_IDX xennet_fill_frags(struct netfront_info *np,
859 869
860static int checksum_setup(struct net_device *dev, struct sk_buff *skb) 870static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
861{ 871{
862 struct iphdr *iph; 872 bool recalculate_partial_csum = false;
863 int err = -EPROTO;
864 int recalculate_partial_csum = 0;
865 873
866 /* 874 /*
867 * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy 875 * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
@@ -873,54 +881,14 @@ static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
873 struct netfront_info *np = netdev_priv(dev); 881 struct netfront_info *np = netdev_priv(dev);
874 np->rx_gso_checksum_fixup++; 882 np->rx_gso_checksum_fixup++;
875 skb->ip_summed = CHECKSUM_PARTIAL; 883 skb->ip_summed = CHECKSUM_PARTIAL;
876 recalculate_partial_csum = 1; 884 recalculate_partial_csum = true;
877 } 885 }
878 886
879 /* A non-CHECKSUM_PARTIAL SKB does not require setup. */ 887 /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
880 if (skb->ip_summed != CHECKSUM_PARTIAL) 888 if (skb->ip_summed != CHECKSUM_PARTIAL)
881 return 0; 889 return 0;
882 890
883 if (skb->protocol != htons(ETH_P_IP)) 891 return skb_checksum_setup(skb, recalculate_partial_csum);
884 goto out;
885
886 iph = (void *)skb->data;
887
888 switch (iph->protocol) {
889 case IPPROTO_TCP:
890 if (!skb_partial_csum_set(skb, 4 * iph->ihl,
891 offsetof(struct tcphdr, check)))
892 goto out;
893
894 if (recalculate_partial_csum) {
895 struct tcphdr *tcph = tcp_hdr(skb);
896 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
897 skb->len - iph->ihl*4,
898 IPPROTO_TCP, 0);
899 }
900 break;
901 case IPPROTO_UDP:
902 if (!skb_partial_csum_set(skb, 4 * iph->ihl,
903 offsetof(struct udphdr, check)))
904 goto out;
905
906 if (recalculate_partial_csum) {
907 struct udphdr *udph = udp_hdr(skb);
908 udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
909 skb->len - iph->ihl*4,
910 IPPROTO_UDP, 0);
911 }
912 break;
913 default:
914 if (net_ratelimit())
915 pr_err("Attempting to checksum a non-TCP/UDP packet, dropping a protocol %d packet\n",
916 iph->protocol);
917 goto out;
918 }
919
920 err = 0;
921
922out:
923 return err;
924} 892}
925 893
926static int handle_incoming_queue(struct net_device *dev, 894static int handle_incoming_queue(struct net_device *dev,
@@ -939,6 +907,7 @@ static int handle_incoming_queue(struct net_device *dev,
939 907
940 /* Ethernet work: Delayed to here as it peeks the header. */ 908 /* Ethernet work: Delayed to here as it peeks the header. */
941 skb->protocol = eth_type_trans(skb, dev); 909 skb->protocol = eth_type_trans(skb, dev);
910 skb_reset_network_header(skb);
942 911
943 if (checksum_setup(dev, skb)) { 912 if (checksum_setup(dev, skb)) {
944 kfree_skb(skb); 913 kfree_skb(skb);
@@ -1122,10 +1091,11 @@ static void xennet_release_tx_bufs(struct netfront_info *np)
1122 continue; 1091 continue;
1123 1092
1124 skb = np->tx_skbs[i].skb; 1093 skb = np->tx_skbs[i].skb;
1125 gnttab_end_foreign_access_ref(np->grant_tx_ref[i], 1094 get_page(np->grant_tx_page[i]);
1126 GNTMAP_readonly); 1095 gnttab_end_foreign_access(np->grant_tx_ref[i],
1127 gnttab_release_grant_reference(&np->gref_tx_head, 1096 GNTMAP_readonly,
1128 np->grant_tx_ref[i]); 1097 (unsigned long)page_address(np->grant_tx_page[i]));
1098 np->grant_tx_page[i] = NULL;
1129 np->grant_tx_ref[i] = GRANT_INVALID_REF; 1099 np->grant_tx_ref[i] = GRANT_INVALID_REF;
1130 add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, i); 1100 add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, i);
1131 dev_kfree_skb_irq(skb); 1101 dev_kfree_skb_irq(skb);
@@ -1134,78 +1104,35 @@ static void xennet_release_tx_bufs(struct netfront_info *np)
1134 1104
1135static void xennet_release_rx_bufs(struct netfront_info *np) 1105static void xennet_release_rx_bufs(struct netfront_info *np)
1136{ 1106{
1137 struct mmu_update *mmu = np->rx_mmu;
1138 struct multicall_entry *mcl = np->rx_mcl;
1139 struct sk_buff_head free_list;
1140 struct sk_buff *skb;
1141 unsigned long mfn;
1142 int xfer = 0, noxfer = 0, unused = 0;
1143 int id, ref; 1107 int id, ref;
1144 1108
1145 dev_warn(&np->netdev->dev, "%s: fix me for copying receiver.\n",
1146 __func__);
1147 return;
1148
1149 skb_queue_head_init(&free_list);
1150
1151 spin_lock_bh(&np->rx_lock); 1109 spin_lock_bh(&np->rx_lock);
1152 1110
1153 for (id = 0; id < NET_RX_RING_SIZE; id++) { 1111 for (id = 0; id < NET_RX_RING_SIZE; id++) {
1154 ref = np->grant_rx_ref[id]; 1112 struct sk_buff *skb;
1155 if (ref == GRANT_INVALID_REF) { 1113 struct page *page;
1156 unused++;
1157 continue;
1158 }
1159 1114
1160 skb = np->rx_skbs[id]; 1115 skb = np->rx_skbs[id];
1161 mfn = gnttab_end_foreign_transfer_ref(ref); 1116 if (!skb)
1162 gnttab_release_grant_reference(&np->gref_rx_head, ref);
1163 np->grant_rx_ref[id] = GRANT_INVALID_REF;
1164
1165 if (0 == mfn) {
1166 skb_shinfo(skb)->nr_frags = 0;
1167 dev_kfree_skb(skb);
1168 noxfer++;
1169 continue; 1117 continue;
1170 }
1171 1118
1172 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 1119 ref = np->grant_rx_ref[id];
1173 /* Remap the page. */ 1120 if (ref == GRANT_INVALID_REF)
1174 const struct page *page = 1121 continue;
1175 skb_frag_page(&skb_shinfo(skb)->frags[0]);
1176 unsigned long pfn = page_to_pfn(page);
1177 void *vaddr = page_address(page);
1178
1179 MULTI_update_va_mapping(mcl, (unsigned long)vaddr,
1180 mfn_pte(mfn, PAGE_KERNEL),
1181 0);
1182 mcl++;
1183 mmu->ptr = ((u64)mfn << PAGE_SHIFT)
1184 | MMU_MACHPHYS_UPDATE;
1185 mmu->val = pfn;
1186 mmu++;
1187 1122
1188 set_phys_to_machine(pfn, mfn); 1123 page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
1189 }
1190 __skb_queue_tail(&free_list, skb);
1191 xfer++;
1192 }
1193 1124
1194 dev_info(&np->netdev->dev, "%s: %d xfer, %d noxfer, %d unused\n", 1125 /* gnttab_end_foreign_access() needs a page ref until
1195 __func__, xfer, noxfer, unused); 1126 * foreign access is ended (which may be deferred).
1127 */
1128 get_page(page);
1129 gnttab_end_foreign_access(ref, 0,
1130 (unsigned long)page_address(page));
1131 np->grant_rx_ref[id] = GRANT_INVALID_REF;
1196 1132
1197 if (xfer) { 1133 kfree_skb(skb);
1198 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1199 /* Do all the remapping work and M2P updates. */
1200 MULTI_mmu_update(mcl, np->rx_mmu, mmu - np->rx_mmu,
1201 NULL, DOMID_SELF);
1202 mcl++;
1203 HYPERVISOR_multicall(np->rx_mcl, mcl - np->rx_mcl);
1204 }
1205 } 1134 }
1206 1135
1207 __skb_queue_purge(&free_list);
1208
1209 spin_unlock_bh(&np->rx_lock); 1136 spin_unlock_bh(&np->rx_lock);
1210} 1137}
1211 1138
@@ -1233,6 +1160,15 @@ static netdev_features_t xennet_fix_features(struct net_device *dev,
1233 features &= ~NETIF_F_SG; 1160 features &= ~NETIF_F_SG;
1234 } 1161 }
1235 1162
1163 if (features & NETIF_F_IPV6_CSUM) {
1164 if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1165 "feature-ipv6-csum-offload", "%d", &val) < 0)
1166 val = 0;
1167
1168 if (!val)
1169 features &= ~NETIF_F_IPV6_CSUM;
1170 }
1171
1236 if (features & NETIF_F_TSO) { 1172 if (features & NETIF_F_TSO) {
1237 if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, 1173 if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1238 "feature-gso-tcpv4", "%d", &val) < 0) 1174 "feature-gso-tcpv4", "%d", &val) < 0)
@@ -1242,6 +1178,15 @@ static netdev_features_t xennet_fix_features(struct net_device *dev,
1242 features &= ~NETIF_F_TSO; 1178 features &= ~NETIF_F_TSO;
1243 } 1179 }
1244 1180
1181 if (features & NETIF_F_TSO6) {
1182 if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1183 "feature-gso-tcpv6", "%d", &val) < 0)
1184 val = 0;
1185
1186 if (!val)
1187 features &= ~NETIF_F_TSO6;
1188 }
1189
1245 return features; 1190 return features;
1246} 1191}
1247 1192
@@ -1358,6 +1303,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1358 for (i = 0; i < NET_RX_RING_SIZE; i++) { 1303 for (i = 0; i < NET_RX_RING_SIZE; i++) {
1359 np->rx_skbs[i] = NULL; 1304 np->rx_skbs[i] = NULL;
1360 np->grant_rx_ref[i] = GRANT_INVALID_REF; 1305 np->grant_rx_ref[i] = GRANT_INVALID_REF;
1306 np->grant_tx_page[i] = NULL;
1361 } 1307 }
1362 1308
1363 /* A grant for every tx ring slot */ 1309 /* A grant for every tx ring slot */
@@ -1380,7 +1326,9 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1380 netif_napi_add(netdev, &np->napi, xennet_poll, 64); 1326 netif_napi_add(netdev, &np->napi, xennet_poll, 64);
1381 netdev->features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM | 1327 netdev->features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
1382 NETIF_F_GSO_ROBUST; 1328 NETIF_F_GSO_ROBUST;
1383 netdev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO; 1329 netdev->hw_features = NETIF_F_SG |
1330 NETIF_F_IPV6_CSUM |
1331 NETIF_F_TSO | NETIF_F_TSO6;
1384 1332
1385 /* 1333 /*
1386 * Assume that all hw features are available for now. This set 1334 * Assume that all hw features are available for now. This set
@@ -1758,6 +1706,19 @@ again:
1758 goto abort_transaction; 1706 goto abort_transaction;
1759 } 1707 }
1760 1708
1709 err = xenbus_write(xbt, dev->nodename, "feature-gso-tcpv6", "1");
1710 if (err) {
1711 message = "writing feature-gso-tcpv6";
1712 goto abort_transaction;
1713 }
1714
1715 err = xenbus_write(xbt, dev->nodename, "feature-ipv6-csum-offload",
1716 "1");
1717 if (err) {
1718 message = "writing feature-ipv6-csum-offload";
1719 goto abort_transaction;
1720 }
1721
1761 err = xenbus_transaction_end(xbt, 0); 1722 err = xenbus_transaction_end(xbt, 0);
1762 if (err) { 1723 if (err) {
1763 if (err == -EAGAIN) 1724 if (err == -EAGAIN)
@@ -1872,7 +1833,6 @@ static void netback_changed(struct xenbus_device *dev,
1872 case XenbusStateReconfiguring: 1833 case XenbusStateReconfiguring:
1873 case XenbusStateReconfigured: 1834 case XenbusStateReconfigured:
1874 case XenbusStateUnknown: 1835 case XenbusStateUnknown:
1875 case XenbusStateClosed:
1876 break; 1836 break;
1877 1837
1878 case XenbusStateInitWait: 1838 case XenbusStateInitWait:
@@ -1887,6 +1847,10 @@ static void netback_changed(struct xenbus_device *dev,
1887 netdev_notify_peers(netdev); 1847 netdev_notify_peers(netdev);
1888 break; 1848 break;
1889 1849
1850 case XenbusStateClosed:
1851 if (dev->state == XenbusStateClosed)
1852 break;
1853 /* Missed the backend's CLOSING state -- fallthrough */
1890 case XenbusStateClosing: 1854 case XenbusStateClosing:
1891 xenbus_frontend_closed(dev); 1855 xenbus_frontend_closed(dev);
1892 break; 1856 break;
@@ -2115,7 +2079,7 @@ static int __init netif_init(void)
2115 if (!xen_domain()) 2079 if (!xen_domain())
2116 return -ENODEV; 2080 return -ENODEV;
2117 2081
2118 if (xen_hvm_domain() && !xen_platform_pci_unplug) 2082 if (!xen_has_pv_nic_devices())
2119 return -ENODEV; 2083 return -ENODEV;
2120 2084
2121 pr_info("Initialising Xen virtual ethernet driver\n"); 2085 pr_info("Initialising Xen virtual ethernet driver\n");