diff options
author | David S. Miller <davem@davemloft.net> | 2013-02-07 23:31:47 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2013-02-07 23:31:47 -0500 |
commit | 0c35565b460ff99f973fb4a9ec63fbcb4176d2e6 (patch) | |
tree | f0293db254c3db8dfee6c624bba307e3308162f7 | |
parent | e21b9d031fa184632c373eedc12e3c296e1aa65b (diff) | |
parent | b9149729ebdcfce63f853aa54a404c6a8f6ebbf3 (diff) |
Merge branch 'netback'
Ian Campbell says:
====================
The Xen netback implementation contains a couple of flaws which can
allow a guest to cause a DoS in the backend domain, potentially
affecting other domains in the system.
CVE-2013-0216 is a failure to sanity check the ring producer/consumer
pointers which can allow a guest to cause netback to loop for an
extended period preventing other work from occurring.
CVE-2013-0217 is a memory leak on an error path which is guest
triggerable.
The following series contains the fixes for these issues, as previously
included in Xen Security Advisory 39:
http://lists.xen.org/archives/html/xen-announce/2013-02/msg00001.html
Changes in v2:
- Typo and block comment format fixes
- Added stable Cc
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/xen-netback/common.h | 3 | ||||
-rw-r--r-- | drivers/net/xen-netback/interface.c | 23 | ||||
-rw-r--r-- | drivers/net/xen-netback/netback.c | 115 |
3 files changed, 88 insertions, 53 deletions
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index 94b79c3338c4..9d7f1723dd8f 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h | |||
@@ -151,6 +151,9 @@ void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb); | |||
151 | /* Notify xenvif that ring now has space to send an skb to the frontend */ | 151 | /* Notify xenvif that ring now has space to send an skb to the frontend */ |
152 | void xenvif_notify_tx_completion(struct xenvif *vif); | 152 | void xenvif_notify_tx_completion(struct xenvif *vif); |
153 | 153 | ||
154 | /* Prevent the device from generating any further traffic. */ | ||
155 | void xenvif_carrier_off(struct xenvif *vif); | ||
156 | |||
154 | /* Returns number of ring slots required to send an skb to the frontend */ | 157 | /* Returns number of ring slots required to send an skb to the frontend */ |
155 | unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb); | 158 | unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb); |
156 | 159 | ||
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index b7d41f8c338a..b8c5193bd420 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c | |||
@@ -343,17 +343,22 @@ err: | |||
343 | return err; | 343 | return err; |
344 | } | 344 | } |
345 | 345 | ||
346 | void xenvif_disconnect(struct xenvif *vif) | 346 | void xenvif_carrier_off(struct xenvif *vif) |
347 | { | 347 | { |
348 | struct net_device *dev = vif->dev; | 348 | struct net_device *dev = vif->dev; |
349 | if (netif_carrier_ok(dev)) { | 349 | |
350 | rtnl_lock(); | 350 | rtnl_lock(); |
351 | netif_carrier_off(dev); /* discard queued packets */ | 351 | netif_carrier_off(dev); /* discard queued packets */ |
352 | if (netif_running(dev)) | 352 | if (netif_running(dev)) |
353 | xenvif_down(vif); | 353 | xenvif_down(vif); |
354 | rtnl_unlock(); | 354 | rtnl_unlock(); |
355 | xenvif_put(vif); | 355 | xenvif_put(vif); |
356 | } | 356 | } |
357 | |||
358 | void xenvif_disconnect(struct xenvif *vif) | ||
359 | { | ||
360 | if (netif_carrier_ok(vif->dev)) | ||
361 | xenvif_carrier_off(vif); | ||
357 | 362 | ||
358 | atomic_dec(&vif->refcnt); | 363 | atomic_dec(&vif->refcnt); |
359 | wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0); | 364 | wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0); |
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index f2d6b78d901d..2b9520c46e97 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c | |||
@@ -147,7 +147,8 @@ void xen_netbk_remove_xenvif(struct xenvif *vif) | |||
147 | atomic_dec(&netbk->netfront_count); | 147 | atomic_dec(&netbk->netfront_count); |
148 | } | 148 | } |
149 | 149 | ||
150 | static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx); | 150 | static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx, |
151 | u8 status); | ||
151 | static void make_tx_response(struct xenvif *vif, | 152 | static void make_tx_response(struct xenvif *vif, |
152 | struct xen_netif_tx_request *txp, | 153 | struct xen_netif_tx_request *txp, |
153 | s8 st); | 154 | s8 st); |
@@ -879,7 +880,7 @@ static void netbk_tx_err(struct xenvif *vif, | |||
879 | 880 | ||
880 | do { | 881 | do { |
881 | make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR); | 882 | make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR); |
882 | if (cons >= end) | 883 | if (cons == end) |
883 | break; | 884 | break; |
884 | txp = RING_GET_REQUEST(&vif->tx, cons++); | 885 | txp = RING_GET_REQUEST(&vif->tx, cons++); |
885 | } while (1); | 886 | } while (1); |
@@ -888,6 +889,13 @@ static void netbk_tx_err(struct xenvif *vif, | |||
888 | xenvif_put(vif); | 889 | xenvif_put(vif); |
889 | } | 890 | } |
890 | 891 | ||
892 | static void netbk_fatal_tx_err(struct xenvif *vif) | ||
893 | { | ||
894 | netdev_err(vif->dev, "fatal error; disabling device\n"); | ||
895 | xenvif_carrier_off(vif); | ||
896 | xenvif_put(vif); | ||
897 | } | ||
898 | |||
891 | static int netbk_count_requests(struct xenvif *vif, | 899 | static int netbk_count_requests(struct xenvif *vif, |
892 | struct xen_netif_tx_request *first, | 900 | struct xen_netif_tx_request *first, |
893 | struct xen_netif_tx_request *txp, | 901 | struct xen_netif_tx_request *txp, |
@@ -901,19 +909,22 @@ static int netbk_count_requests(struct xenvif *vif, | |||
901 | 909 | ||
902 | do { | 910 | do { |
903 | if (frags >= work_to_do) { | 911 | if (frags >= work_to_do) { |
904 | netdev_dbg(vif->dev, "Need more frags\n"); | 912 | netdev_err(vif->dev, "Need more frags\n"); |
913 | netbk_fatal_tx_err(vif); | ||
905 | return -frags; | 914 | return -frags; |
906 | } | 915 | } |
907 | 916 | ||
908 | if (unlikely(frags >= MAX_SKB_FRAGS)) { | 917 | if (unlikely(frags >= MAX_SKB_FRAGS)) { |
909 | netdev_dbg(vif->dev, "Too many frags\n"); | 918 | netdev_err(vif->dev, "Too many frags\n"); |
919 | netbk_fatal_tx_err(vif); | ||
910 | return -frags; | 920 | return -frags; |
911 | } | 921 | } |
912 | 922 | ||
913 | memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags), | 923 | memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags), |
914 | sizeof(*txp)); | 924 | sizeof(*txp)); |
915 | if (txp->size > first->size) { | 925 | if (txp->size > first->size) { |
916 | netdev_dbg(vif->dev, "Frags galore\n"); | 926 | netdev_err(vif->dev, "Frag is bigger than frame.\n"); |
927 | netbk_fatal_tx_err(vif); | ||
917 | return -frags; | 928 | return -frags; |
918 | } | 929 | } |
919 | 930 | ||
@@ -921,8 +932,9 @@ static int netbk_count_requests(struct xenvif *vif, | |||
921 | frags++; | 932 | frags++; |
922 | 933 | ||
923 | if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) { | 934 | if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) { |
924 | netdev_dbg(vif->dev, "txp->offset: %x, size: %u\n", | 935 | netdev_err(vif->dev, "txp->offset: %x, size: %u\n", |
925 | txp->offset, txp->size); | 936 | txp->offset, txp->size); |
937 | netbk_fatal_tx_err(vif); | ||
926 | return -frags; | 938 | return -frags; |
927 | } | 939 | } |
928 | } while ((txp++)->flags & XEN_NETTXF_more_data); | 940 | } while ((txp++)->flags & XEN_NETTXF_more_data); |
@@ -966,7 +978,7 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk, | |||
966 | pending_idx = netbk->pending_ring[index]; | 978 | pending_idx = netbk->pending_ring[index]; |
967 | page = xen_netbk_alloc_page(netbk, skb, pending_idx); | 979 | page = xen_netbk_alloc_page(netbk, skb, pending_idx); |
968 | if (!page) | 980 | if (!page) |
969 | return NULL; | 981 | goto err; |
970 | 982 | ||
971 | gop->source.u.ref = txp->gref; | 983 | gop->source.u.ref = txp->gref; |
972 | gop->source.domid = vif->domid; | 984 | gop->source.domid = vif->domid; |
@@ -988,6 +1000,17 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk, | |||
988 | } | 1000 | } |
989 | 1001 | ||
990 | return gop; | 1002 | return gop; |
1003 | err: | ||
1004 | /* Unwind, freeing all pages and sending error responses. */ | ||
1005 | while (i-- > start) { | ||
1006 | xen_netbk_idx_release(netbk, frag_get_pending_idx(&frags[i]), | ||
1007 | XEN_NETIF_RSP_ERROR); | ||
1008 | } | ||
1009 | /* The head too, if necessary. */ | ||
1010 | if (start) | ||
1011 | xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR); | ||
1012 | |||
1013 | return NULL; | ||
991 | } | 1014 | } |
992 | 1015 | ||
993 | static int xen_netbk_tx_check_gop(struct xen_netbk *netbk, | 1016 | static int xen_netbk_tx_check_gop(struct xen_netbk *netbk, |
@@ -996,30 +1019,20 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk, | |||
996 | { | 1019 | { |
997 | struct gnttab_copy *gop = *gopp; | 1020 | struct gnttab_copy *gop = *gopp; |
998 | u16 pending_idx = *((u16 *)skb->data); | 1021 | u16 pending_idx = *((u16 *)skb->data); |
999 | struct pending_tx_info *pending_tx_info = netbk->pending_tx_info; | ||
1000 | struct xenvif *vif = pending_tx_info[pending_idx].vif; | ||
1001 | struct xen_netif_tx_request *txp; | ||
1002 | struct skb_shared_info *shinfo = skb_shinfo(skb); | 1022 | struct skb_shared_info *shinfo = skb_shinfo(skb); |
1003 | int nr_frags = shinfo->nr_frags; | 1023 | int nr_frags = shinfo->nr_frags; |
1004 | int i, err, start; | 1024 | int i, err, start; |
1005 | 1025 | ||
1006 | /* Check status of header. */ | 1026 | /* Check status of header. */ |
1007 | err = gop->status; | 1027 | err = gop->status; |
1008 | if (unlikely(err)) { | 1028 | if (unlikely(err)) |
1009 | pending_ring_idx_t index; | 1029 | xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR); |
1010 | index = pending_index(netbk->pending_prod++); | ||
1011 | txp = &pending_tx_info[pending_idx].req; | ||
1012 | make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR); | ||
1013 | netbk->pending_ring[index] = pending_idx; | ||
1014 | xenvif_put(vif); | ||
1015 | } | ||
1016 | 1030 | ||
1017 | /* Skip first skb fragment if it is on same page as header fragment. */ | 1031 | /* Skip first skb fragment if it is on same page as header fragment. */ |
1018 | start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx); | 1032 | start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx); |
1019 | 1033 | ||
1020 | for (i = start; i < nr_frags; i++) { | 1034 | for (i = start; i < nr_frags; i++) { |
1021 | int j, newerr; | 1035 | int j, newerr; |
1022 | pending_ring_idx_t index; | ||
1023 | 1036 | ||
1024 | pending_idx = frag_get_pending_idx(&shinfo->frags[i]); | 1037 | pending_idx = frag_get_pending_idx(&shinfo->frags[i]); |
1025 | 1038 | ||
@@ -1028,16 +1041,12 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk, | |||
1028 | if (likely(!newerr)) { | 1041 | if (likely(!newerr)) { |
1029 | /* Had a previous error? Invalidate this fragment. */ | 1042 | /* Had a previous error? Invalidate this fragment. */ |
1030 | if (unlikely(err)) | 1043 | if (unlikely(err)) |
1031 | xen_netbk_idx_release(netbk, pending_idx); | 1044 | xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); |
1032 | continue; | 1045 | continue; |
1033 | } | 1046 | } |
1034 | 1047 | ||
1035 | /* Error on this fragment: respond to client with an error. */ | 1048 | /* Error on this fragment: respond to client with an error. */ |
1036 | txp = &netbk->pending_tx_info[pending_idx].req; | 1049 | xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR); |
1037 | make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR); | ||
1038 | index = pending_index(netbk->pending_prod++); | ||
1039 | netbk->pending_ring[index] = pending_idx; | ||
1040 | xenvif_put(vif); | ||
1041 | 1050 | ||
1042 | /* Not the first error? Preceding frags already invalidated. */ | 1051 | /* Not the first error? Preceding frags already invalidated. */ |
1043 | if (err) | 1052 | if (err) |
@@ -1045,10 +1054,10 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk, | |||
1045 | 1054 | ||
1046 | /* First error: invalidate header and preceding fragments. */ | 1055 | /* First error: invalidate header and preceding fragments. */ |
1047 | pending_idx = *((u16 *)skb->data); | 1056 | pending_idx = *((u16 *)skb->data); |
1048 | xen_netbk_idx_release(netbk, pending_idx); | 1057 | xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); |
1049 | for (j = start; j < i; j++) { | 1058 | for (j = start; j < i; j++) { |
1050 | pending_idx = frag_get_pending_idx(&shinfo->frags[j]); | 1059 | pending_idx = frag_get_pending_idx(&shinfo->frags[j]); |
1051 | xen_netbk_idx_release(netbk, pending_idx); | 1060 | xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); |
1052 | } | 1061 | } |
1053 | 1062 | ||
1054 | /* Remember the error: invalidate all subsequent fragments. */ | 1063 | /* Remember the error: invalidate all subsequent fragments. */ |
@@ -1082,7 +1091,7 @@ static void xen_netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb) | |||
1082 | 1091 | ||
1083 | /* Take an extra reference to offset xen_netbk_idx_release */ | 1092 | /* Take an extra reference to offset xen_netbk_idx_release */ |
1084 | get_page(netbk->mmap_pages[pending_idx]); | 1093 | get_page(netbk->mmap_pages[pending_idx]); |
1085 | xen_netbk_idx_release(netbk, pending_idx); | 1094 | xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); |
1086 | } | 1095 | } |
1087 | } | 1096 | } |
1088 | 1097 | ||
@@ -1095,7 +1104,8 @@ static int xen_netbk_get_extras(struct xenvif *vif, | |||
1095 | 1104 | ||
1096 | do { | 1105 | do { |
1097 | if (unlikely(work_to_do-- <= 0)) { | 1106 | if (unlikely(work_to_do-- <= 0)) { |
1098 | netdev_dbg(vif->dev, "Missing extra info\n"); | 1107 | netdev_err(vif->dev, "Missing extra info\n"); |
1108 | netbk_fatal_tx_err(vif); | ||
1099 | return -EBADR; | 1109 | return -EBADR; |
1100 | } | 1110 | } |
1101 | 1111 | ||
@@ -1104,8 +1114,9 @@ static int xen_netbk_get_extras(struct xenvif *vif, | |||
1104 | if (unlikely(!extra.type || | 1114 | if (unlikely(!extra.type || |
1105 | extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) { | 1115 | extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) { |
1106 | vif->tx.req_cons = ++cons; | 1116 | vif->tx.req_cons = ++cons; |
1107 | netdev_dbg(vif->dev, | 1117 | netdev_err(vif->dev, |
1108 | "Invalid extra type: %d\n", extra.type); | 1118 | "Invalid extra type: %d\n", extra.type); |
1119 | netbk_fatal_tx_err(vif); | ||
1109 | return -EINVAL; | 1120 | return -EINVAL; |
1110 | } | 1121 | } |
1111 | 1122 | ||
@@ -1121,13 +1132,15 @@ static int netbk_set_skb_gso(struct xenvif *vif, | |||
1121 | struct xen_netif_extra_info *gso) | 1132 | struct xen_netif_extra_info *gso) |
1122 | { | 1133 | { |
1123 | if (!gso->u.gso.size) { | 1134 | if (!gso->u.gso.size) { |
1124 | netdev_dbg(vif->dev, "GSO size must not be zero.\n"); | 1135 | netdev_err(vif->dev, "GSO size must not be zero.\n"); |
1136 | netbk_fatal_tx_err(vif); | ||
1125 | return -EINVAL; | 1137 | return -EINVAL; |
1126 | } | 1138 | } |
1127 | 1139 | ||
1128 | /* Currently only TCPv4 S.O. is supported. */ | 1140 | /* Currently only TCPv4 S.O. is supported. */ |
1129 | if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) { | 1141 | if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) { |
1130 | netdev_dbg(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type); | 1142 | netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type); |
1143 | netbk_fatal_tx_err(vif); | ||
1131 | return -EINVAL; | 1144 | return -EINVAL; |
1132 | } | 1145 | } |
1133 | 1146 | ||
@@ -1264,9 +1277,25 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) | |||
1264 | 1277 | ||
1265 | /* Get a netif from the list with work to do. */ | 1278 | /* Get a netif from the list with work to do. */ |
1266 | vif = poll_net_schedule_list(netbk); | 1279 | vif = poll_net_schedule_list(netbk); |
1280 | /* This can sometimes happen because the test of | ||
1281 | * list_empty(net_schedule_list) at the top of the | ||
1282 | * loop is unlocked. Just go back and have another | ||
1283 | * look. | ||
1284 | */ | ||
1267 | if (!vif) | 1285 | if (!vif) |
1268 | continue; | 1286 | continue; |
1269 | 1287 | ||
1288 | if (vif->tx.sring->req_prod - vif->tx.req_cons > | ||
1289 | XEN_NETIF_TX_RING_SIZE) { | ||
1290 | netdev_err(vif->dev, | ||
1291 | "Impossible number of requests. " | ||
1292 | "req_prod %d, req_cons %d, size %ld\n", | ||
1293 | vif->tx.sring->req_prod, vif->tx.req_cons, | ||
1294 | XEN_NETIF_TX_RING_SIZE); | ||
1295 | netbk_fatal_tx_err(vif); | ||
1296 | continue; | ||
1297 | } | ||
1298 | |||
1270 | RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do); | 1299 | RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do); |
1271 | if (!work_to_do) { | 1300 | if (!work_to_do) { |
1272 | xenvif_put(vif); | 1301 | xenvif_put(vif); |
@@ -1294,17 +1323,14 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) | |||
1294 | work_to_do = xen_netbk_get_extras(vif, extras, | 1323 | work_to_do = xen_netbk_get_extras(vif, extras, |
1295 | work_to_do); | 1324 | work_to_do); |
1296 | idx = vif->tx.req_cons; | 1325 | idx = vif->tx.req_cons; |
1297 | if (unlikely(work_to_do < 0)) { | 1326 | if (unlikely(work_to_do < 0)) |
1298 | netbk_tx_err(vif, &txreq, idx); | ||
1299 | continue; | 1327 | continue; |
1300 | } | ||
1301 | } | 1328 | } |
1302 | 1329 | ||
1303 | ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do); | 1330 | ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do); |
1304 | if (unlikely(ret < 0)) { | 1331 | if (unlikely(ret < 0)) |
1305 | netbk_tx_err(vif, &txreq, idx - ret); | ||
1306 | continue; | 1332 | continue; |
1307 | } | 1333 | |
1308 | idx += ret; | 1334 | idx += ret; |
1309 | 1335 | ||
1310 | if (unlikely(txreq.size < ETH_HLEN)) { | 1336 | if (unlikely(txreq.size < ETH_HLEN)) { |
@@ -1316,11 +1342,11 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) | |||
1316 | 1342 | ||
1317 | /* No crossing a page as the payload mustn't fragment. */ | 1343 | /* No crossing a page as the payload mustn't fragment. */ |
1318 | if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) { | 1344 | if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) { |
1319 | netdev_dbg(vif->dev, | 1345 | netdev_err(vif->dev, |
1320 | "txreq.offset: %x, size: %u, end: %lu\n", | 1346 | "txreq.offset: %x, size: %u, end: %lu\n", |
1321 | txreq.offset, txreq.size, | 1347 | txreq.offset, txreq.size, |
1322 | (txreq.offset&~PAGE_MASK) + txreq.size); | 1348 | (txreq.offset&~PAGE_MASK) + txreq.size); |
1323 | netbk_tx_err(vif, &txreq, idx); | 1349 | netbk_fatal_tx_err(vif); |
1324 | continue; | 1350 | continue; |
1325 | } | 1351 | } |
1326 | 1352 | ||
@@ -1348,8 +1374,8 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) | |||
1348 | gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; | 1374 | gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; |
1349 | 1375 | ||
1350 | if (netbk_set_skb_gso(vif, skb, gso)) { | 1376 | if (netbk_set_skb_gso(vif, skb, gso)) { |
1377 | /* Failure in netbk_set_skb_gso is fatal. */ | ||
1351 | kfree_skb(skb); | 1378 | kfree_skb(skb); |
1352 | netbk_tx_err(vif, &txreq, idx); | ||
1353 | continue; | 1379 | continue; |
1354 | } | 1380 | } |
1355 | } | 1381 | } |
@@ -1448,7 +1474,7 @@ static void xen_netbk_tx_submit(struct xen_netbk *netbk) | |||
1448 | txp->size -= data_len; | 1474 | txp->size -= data_len; |
1449 | } else { | 1475 | } else { |
1450 | /* Schedule a response immediately. */ | 1476 | /* Schedule a response immediately. */ |
1451 | xen_netbk_idx_release(netbk, pending_idx); | 1477 | xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); |
1452 | } | 1478 | } |
1453 | 1479 | ||
1454 | if (txp->flags & XEN_NETTXF_csum_blank) | 1480 | if (txp->flags & XEN_NETTXF_csum_blank) |
@@ -1500,7 +1526,8 @@ static void xen_netbk_tx_action(struct xen_netbk *netbk) | |||
1500 | xen_netbk_tx_submit(netbk); | 1526 | xen_netbk_tx_submit(netbk); |
1501 | } | 1527 | } |
1502 | 1528 | ||
1503 | static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx) | 1529 | static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx, |
1530 | u8 status) | ||
1504 | { | 1531 | { |
1505 | struct xenvif *vif; | 1532 | struct xenvif *vif; |
1506 | struct pending_tx_info *pending_tx_info; | 1533 | struct pending_tx_info *pending_tx_info; |
@@ -1514,7 +1541,7 @@ static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx) | |||
1514 | 1541 | ||
1515 | vif = pending_tx_info->vif; | 1542 | vif = pending_tx_info->vif; |
1516 | 1543 | ||
1517 | make_tx_response(vif, &pending_tx_info->req, XEN_NETIF_RSP_OKAY); | 1544 | make_tx_response(vif, &pending_tx_info->req, status); |
1518 | 1545 | ||
1519 | index = pending_index(netbk->pending_prod++); | 1546 | index = pending_index(netbk->pending_prod++); |
1520 | netbk->pending_ring[index] = pending_idx; | 1547 | netbk->pending_ring[index] = pending_idx; |