aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/xen-netback/common.h3
-rw-r--r--drivers/net/xen-netback/interface.c23
-rw-r--r--drivers/net/xen-netback/netback.c62
3 files changed, 62 insertions, 26 deletions
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 94b79c3338c4..9d7f1723dd8f 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -151,6 +151,9 @@ void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb);
151/* Notify xenvif that ring now has space to send an skb to the frontend */ 151/* Notify xenvif that ring now has space to send an skb to the frontend */
152void xenvif_notify_tx_completion(struct xenvif *vif); 152void xenvif_notify_tx_completion(struct xenvif *vif);
153 153
154/* Prevent the device from generating any further traffic. */
155void xenvif_carrier_off(struct xenvif *vif);
156
154/* Returns number of ring slots required to send an skb to the frontend */ 157/* Returns number of ring slots required to send an skb to the frontend */
155unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb); 158unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb);
156 159
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index b7d41f8c338a..b8c5193bd420 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -343,17 +343,22 @@ err:
343 return err; 343 return err;
344} 344}
345 345
346void xenvif_disconnect(struct xenvif *vif) 346void xenvif_carrier_off(struct xenvif *vif)
347{ 347{
348 struct net_device *dev = vif->dev; 348 struct net_device *dev = vif->dev;
349 if (netif_carrier_ok(dev)) { 349
350 rtnl_lock(); 350 rtnl_lock();
351 netif_carrier_off(dev); /* discard queued packets */ 351 netif_carrier_off(dev); /* discard queued packets */
352 if (netif_running(dev)) 352 if (netif_running(dev))
353 xenvif_down(vif); 353 xenvif_down(vif);
354 rtnl_unlock(); 354 rtnl_unlock();
355 xenvif_put(vif); 355 xenvif_put(vif);
356 } 356}
357
358void xenvif_disconnect(struct xenvif *vif)
359{
360 if (netif_carrier_ok(vif->dev))
361 xenvif_carrier_off(vif);
357 362
358 atomic_dec(&vif->refcnt); 363 atomic_dec(&vif->refcnt);
359 wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0); 364 wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0);
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index f2d6b78d901d..c2e3336b4f98 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -888,6 +888,13 @@ static void netbk_tx_err(struct xenvif *vif,
888 xenvif_put(vif); 888 xenvif_put(vif);
889} 889}
890 890
891static void netbk_fatal_tx_err(struct xenvif *vif)
892{
893 netdev_err(vif->dev, "fatal error; disabling device\n");
894 xenvif_carrier_off(vif);
895 xenvif_put(vif);
896}
897
891static int netbk_count_requests(struct xenvif *vif, 898static int netbk_count_requests(struct xenvif *vif,
892 struct xen_netif_tx_request *first, 899 struct xen_netif_tx_request *first,
893 struct xen_netif_tx_request *txp, 900 struct xen_netif_tx_request *txp,
@@ -901,19 +908,22 @@ static int netbk_count_requests(struct xenvif *vif,
901 908
902 do { 909 do {
903 if (frags >= work_to_do) { 910 if (frags >= work_to_do) {
904 netdev_dbg(vif->dev, "Need more frags\n"); 911 netdev_err(vif->dev, "Need more frags\n");
912 netbk_fatal_tx_err(vif);
905 return -frags; 913 return -frags;
906 } 914 }
907 915
908 if (unlikely(frags >= MAX_SKB_FRAGS)) { 916 if (unlikely(frags >= MAX_SKB_FRAGS)) {
909 netdev_dbg(vif->dev, "Too many frags\n"); 917 netdev_err(vif->dev, "Too many frags\n");
918 netbk_fatal_tx_err(vif);
910 return -frags; 919 return -frags;
911 } 920 }
912 921
913 memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags), 922 memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags),
914 sizeof(*txp)); 923 sizeof(*txp));
915 if (txp->size > first->size) { 924 if (txp->size > first->size) {
916 netdev_dbg(vif->dev, "Frags galore\n"); 925 netdev_err(vif->dev, "Frag is bigger than frame.\n");
926 netbk_fatal_tx_err(vif);
917 return -frags; 927 return -frags;
918 } 928 }
919 929
@@ -921,8 +931,9 @@ static int netbk_count_requests(struct xenvif *vif,
921 frags++; 931 frags++;
922 932
923 if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) { 933 if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
924 netdev_dbg(vif->dev, "txp->offset: %x, size: %u\n", 934 netdev_err(vif->dev, "txp->offset: %x, size: %u\n",
925 txp->offset, txp->size); 935 txp->offset, txp->size);
936 netbk_fatal_tx_err(vif);
926 return -frags; 937 return -frags;
927 } 938 }
928 } while ((txp++)->flags & XEN_NETTXF_more_data); 939 } while ((txp++)->flags & XEN_NETTXF_more_data);
@@ -1095,7 +1106,8 @@ static int xen_netbk_get_extras(struct xenvif *vif,
1095 1106
1096 do { 1107 do {
1097 if (unlikely(work_to_do-- <= 0)) { 1108 if (unlikely(work_to_do-- <= 0)) {
1098 netdev_dbg(vif->dev, "Missing extra info\n"); 1109 netdev_err(vif->dev, "Missing extra info\n");
1110 netbk_fatal_tx_err(vif);
1099 return -EBADR; 1111 return -EBADR;
1100 } 1112 }
1101 1113
@@ -1104,8 +1116,9 @@ static int xen_netbk_get_extras(struct xenvif *vif,
1104 if (unlikely(!extra.type || 1116 if (unlikely(!extra.type ||
1105 extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) { 1117 extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1106 vif->tx.req_cons = ++cons; 1118 vif->tx.req_cons = ++cons;
1107 netdev_dbg(vif->dev, 1119 netdev_err(vif->dev,
1108 "Invalid extra type: %d\n", extra.type); 1120 "Invalid extra type: %d\n", extra.type);
1121 netbk_fatal_tx_err(vif);
1109 return -EINVAL; 1122 return -EINVAL;
1110 } 1123 }
1111 1124
@@ -1121,13 +1134,15 @@ static int netbk_set_skb_gso(struct xenvif *vif,
1121 struct xen_netif_extra_info *gso) 1134 struct xen_netif_extra_info *gso)
1122{ 1135{
1123 if (!gso->u.gso.size) { 1136 if (!gso->u.gso.size) {
1124 netdev_dbg(vif->dev, "GSO size must not be zero.\n"); 1137 netdev_err(vif->dev, "GSO size must not be zero.\n");
1138 netbk_fatal_tx_err(vif);
1125 return -EINVAL; 1139 return -EINVAL;
1126 } 1140 }
1127 1141
1128 /* Currently only TCPv4 S.O. is supported. */ 1142 /* Currently only TCPv4 S.O. is supported. */
1129 if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) { 1143 if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
1130 netdev_dbg(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type); 1144 netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
1145 netbk_fatal_tx_err(vif);
1131 return -EINVAL; 1146 return -EINVAL;
1132 } 1147 }
1133 1148
@@ -1264,9 +1279,25 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
1264 1279
1265 /* Get a netif from the list with work to do. */ 1280 /* Get a netif from the list with work to do. */
1266 vif = poll_net_schedule_list(netbk); 1281 vif = poll_net_schedule_list(netbk);
1282 /* This can sometimes happen because the test of
1283 * list_empty(net_schedule_list) at the top of the
1284 * loop is unlocked. Just go back and have another
1285 * look.
1286 */
1267 if (!vif) 1287 if (!vif)
1268 continue; 1288 continue;
1269 1289
1290 if (vif->tx.sring->req_prod - vif->tx.req_cons >
1291 XEN_NETIF_TX_RING_SIZE) {
1292 netdev_err(vif->dev,
1293 "Impossible number of requests. "
1294 "req_prod %d, req_cons %d, size %ld\n",
1295 vif->tx.sring->req_prod, vif->tx.req_cons,
1296 XEN_NETIF_TX_RING_SIZE);
1297 netbk_fatal_tx_err(vif);
1298 continue;
1299 }
1300
1270 RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do); 1301 RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do);
1271 if (!work_to_do) { 1302 if (!work_to_do) {
1272 xenvif_put(vif); 1303 xenvif_put(vif);
@@ -1294,17 +1325,14 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
1294 work_to_do = xen_netbk_get_extras(vif, extras, 1325 work_to_do = xen_netbk_get_extras(vif, extras,
1295 work_to_do); 1326 work_to_do);
1296 idx = vif->tx.req_cons; 1327 idx = vif->tx.req_cons;
1297 if (unlikely(work_to_do < 0)) { 1328 if (unlikely(work_to_do < 0))
1298 netbk_tx_err(vif, &txreq, idx);
1299 continue; 1329 continue;
1300 }
1301 } 1330 }
1302 1331
1303 ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do); 1332 ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do);
1304 if (unlikely(ret < 0)) { 1333 if (unlikely(ret < 0))
1305 netbk_tx_err(vif, &txreq, idx - ret);
1306 continue; 1334 continue;
1307 } 1335
1308 idx += ret; 1336 idx += ret;
1309 1337
1310 if (unlikely(txreq.size < ETH_HLEN)) { 1338 if (unlikely(txreq.size < ETH_HLEN)) {
@@ -1316,11 +1344,11 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
1316 1344
1317 /* No crossing a page as the payload mustn't fragment. */ 1345 /* No crossing a page as the payload mustn't fragment. */
1318 if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) { 1346 if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
1319 netdev_dbg(vif->dev, 1347 netdev_err(vif->dev,
1320 "txreq.offset: %x, size: %u, end: %lu\n", 1348 "txreq.offset: %x, size: %u, end: %lu\n",
1321 txreq.offset, txreq.size, 1349 txreq.offset, txreq.size,
1322 (txreq.offset&~PAGE_MASK) + txreq.size); 1350 (txreq.offset&~PAGE_MASK) + txreq.size);
1323 netbk_tx_err(vif, &txreq, idx); 1351 netbk_fatal_tx_err(vif);
1324 continue; 1352 continue;
1325 } 1353 }
1326 1354
@@ -1348,8 +1376,8 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
1348 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; 1376 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1349 1377
1350 if (netbk_set_skb_gso(vif, skb, gso)) { 1378 if (netbk_set_skb_gso(vif, skb, gso)) {
1379 /* Failure in netbk_set_skb_gso is fatal. */
1351 kfree_skb(skb); 1380 kfree_skb(skb);
1352 netbk_tx_err(vif, &txreq, idx);
1353 continue; 1381 continue;
1354 } 1382 }
1355 } 1383 }