diff options
Diffstat (limited to 'drivers/net/xen-netback/netback.c')
-rw-r--r-- | drivers/net/xen-netback/netback.c | 122 |
1 files changed, 69 insertions, 53 deletions
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 4bb78862afc8..99c8f09b4654 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c | |||
@@ -940,35 +940,37 @@ static inline void xenvif_grant_handle_reset(struct xenvif *vif, | |||
940 | 940 | ||
941 | static int xenvif_tx_check_gop(struct xenvif *vif, | 941 | static int xenvif_tx_check_gop(struct xenvif *vif, |
942 | struct sk_buff *skb, | 942 | struct sk_buff *skb, |
943 | struct gnttab_map_grant_ref **gopp_map) | 943 | struct gnttab_map_grant_ref **gopp_map, |
944 | struct gnttab_copy **gopp_copy) | ||
944 | { | 945 | { |
945 | struct gnttab_map_grant_ref *gop_map = *gopp_map; | 946 | struct gnttab_map_grant_ref *gop_map = *gopp_map; |
946 | u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx; | 947 | u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx; |
947 | struct skb_shared_info *shinfo = skb_shinfo(skb); | 948 | struct skb_shared_info *shinfo = skb_shinfo(skb); |
948 | struct pending_tx_info *tx_info; | ||
949 | int nr_frags = shinfo->nr_frags; | 949 | int nr_frags = shinfo->nr_frags; |
950 | int i, err, start; | 950 | int i, err; |
951 | struct sk_buff *first_skb = NULL; | 951 | struct sk_buff *first_skb = NULL; |
952 | 952 | ||
953 | /* Check status of header. */ | 953 | /* Check status of header. */ |
954 | err = gop_map->status; | 954 | err = (*gopp_copy)->status; |
955 | if (unlikely(err)) | 955 | (*gopp_copy)++; |
956 | if (unlikely(err)) { | ||
957 | if (net_ratelimit()) | ||
958 | netdev_dbg(vif->dev, | ||
959 | "Grant copy of header failed! status: %d pending_idx% %u ref: %u\n", | ||
960 | (*gopp_copy)->status, | ||
961 | pending_idx, | ||
962 | (*gopp_copy)->source.u.ref); | ||
956 | xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR); | 963 | xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR); |
957 | else | 964 | } |
958 | xenvif_grant_handle_set(vif, pending_idx , gop_map->handle); | ||
959 | |||
960 | /* Skip first skb fragment if it is on same page as header fragment. */ | ||
961 | start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx); | ||
962 | 965 | ||
963 | check_frags: | 966 | check_frags: |
964 | for (i = start; i < nr_frags; i++) { | 967 | for (i = 0; i < nr_frags; i++, gop_map++) { |
965 | int j, newerr; | 968 | int j, newerr; |
966 | 969 | ||
967 | pending_idx = frag_get_pending_idx(&shinfo->frags[i]); | 970 | pending_idx = frag_get_pending_idx(&shinfo->frags[i]); |
968 | tx_info = &vif->pending_tx_info[pending_idx]; | ||
969 | 971 | ||
970 | /* Check error status: if okay then remember grant handle. */ | 972 | /* Check error status: if okay then remember grant handle. */ |
971 | newerr = (++gop_map)->status; | 973 | newerr = gop_map->status; |
972 | 974 | ||
973 | if (likely(!newerr)) { | 975 | if (likely(!newerr)) { |
974 | xenvif_grant_handle_set(vif, | 976 | xenvif_grant_handle_set(vif, |
@@ -981,18 +983,20 @@ check_frags: | |||
981 | } | 983 | } |
982 | 984 | ||
983 | /* Error on this fragment: respond to client with an error. */ | 985 | /* Error on this fragment: respond to client with an error. */ |
986 | if (net_ratelimit()) | ||
987 | netdev_dbg(vif->dev, | ||
988 | "Grant map of %d. frag failed! status: %d pending_idx% %u ref: %u\n", | ||
989 | i, | ||
990 | gop_map->status, | ||
991 | pending_idx, | ||
992 | gop_map->ref); | ||
984 | xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR); | 993 | xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR); |
985 | 994 | ||
986 | /* Not the first error? Preceding frags already invalidated. */ | 995 | /* Not the first error? Preceding frags already invalidated. */ |
987 | if (err) | 996 | if (err) |
988 | continue; | 997 | continue; |
989 | /* First error: invalidate header and preceding fragments. */ | 998 | /* First error: invalidate preceding fragments. */ |
990 | if (!first_skb) | 999 | for (j = 0; j < i; j++) { |
991 | pending_idx = XENVIF_TX_CB(skb)->pending_idx; | ||
992 | else | ||
993 | pending_idx = XENVIF_TX_CB(skb)->pending_idx; | ||
994 | xenvif_idx_unmap(vif, pending_idx); | ||
995 | for (j = start; j < i; j++) { | ||
996 | pending_idx = frag_get_pending_idx(&shinfo->frags[j]); | 1000 | pending_idx = frag_get_pending_idx(&shinfo->frags[j]); |
997 | xenvif_idx_unmap(vif, pending_idx); | 1001 | xenvif_idx_unmap(vif, pending_idx); |
998 | } | 1002 | } |
@@ -1006,7 +1010,6 @@ check_frags: | |||
1006 | skb = shinfo->frag_list; | 1010 | skb = shinfo->frag_list; |
1007 | shinfo = skb_shinfo(skb); | 1011 | shinfo = skb_shinfo(skb); |
1008 | nr_frags = shinfo->nr_frags; | 1012 | nr_frags = shinfo->nr_frags; |
1009 | start = 0; | ||
1010 | 1013 | ||
1011 | goto check_frags; | 1014 | goto check_frags; |
1012 | } | 1015 | } |
@@ -1017,15 +1020,13 @@ check_frags: | |||
1017 | if (first_skb && err) { | 1020 | if (first_skb && err) { |
1018 | int j; | 1021 | int j; |
1019 | shinfo = skb_shinfo(first_skb); | 1022 | shinfo = skb_shinfo(first_skb); |
1020 | pending_idx = XENVIF_TX_CB(skb)->pending_idx; | 1023 | for (j = 0; j < shinfo->nr_frags; j++) { |
1021 | start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx); | ||
1022 | for (j = start; j < shinfo->nr_frags; j++) { | ||
1023 | pending_idx = frag_get_pending_idx(&shinfo->frags[j]); | 1024 | pending_idx = frag_get_pending_idx(&shinfo->frags[j]); |
1024 | xenvif_idx_unmap(vif, pending_idx); | 1025 | xenvif_idx_unmap(vif, pending_idx); |
1025 | } | 1026 | } |
1026 | } | 1027 | } |
1027 | 1028 | ||
1028 | *gopp_map = gop_map + 1; | 1029 | *gopp_map = gop_map; |
1029 | return err; | 1030 | return err; |
1030 | } | 1031 | } |
1031 | 1032 | ||
@@ -1036,9 +1037,6 @@ static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb) | |||
1036 | int i; | 1037 | int i; |
1037 | u16 prev_pending_idx = INVALID_PENDING_IDX; | 1038 | u16 prev_pending_idx = INVALID_PENDING_IDX; |
1038 | 1039 | ||
1039 | if (skb_shinfo(skb)->destructor_arg) | ||
1040 | prev_pending_idx = XENVIF_TX_CB(skb)->pending_idx; | ||
1041 | |||
1042 | for (i = 0; i < nr_frags; i++) { | 1040 | for (i = 0; i < nr_frags; i++) { |
1043 | skb_frag_t *frag = shinfo->frags + i; | 1041 | skb_frag_t *frag = shinfo->frags + i; |
1044 | struct xen_netif_tx_request *txp; | 1042 | struct xen_netif_tx_request *txp; |
@@ -1048,10 +1046,10 @@ static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb) | |||
1048 | pending_idx = frag_get_pending_idx(frag); | 1046 | pending_idx = frag_get_pending_idx(frag); |
1049 | 1047 | ||
1050 | /* If this is not the first frag, chain it to the previous*/ | 1048 | /* If this is not the first frag, chain it to the previous*/ |
1051 | if (unlikely(prev_pending_idx == INVALID_PENDING_IDX)) | 1049 | if (prev_pending_idx == INVALID_PENDING_IDX) |
1052 | skb_shinfo(skb)->destructor_arg = | 1050 | skb_shinfo(skb)->destructor_arg = |
1053 | &callback_param(vif, pending_idx); | 1051 | &callback_param(vif, pending_idx); |
1054 | else if (likely(pending_idx != prev_pending_idx)) | 1052 | else |
1055 | callback_param(vif, prev_pending_idx).ctx = | 1053 | callback_param(vif, prev_pending_idx).ctx = |
1056 | &callback_param(vif, pending_idx); | 1054 | &callback_param(vif, pending_idx); |
1057 | 1055 | ||
@@ -1191,7 +1189,10 @@ static bool tx_credit_exceeded(struct xenvif *vif, unsigned size) | |||
1191 | return false; | 1189 | return false; |
1192 | } | 1190 | } |
1193 | 1191 | ||
1194 | static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget) | 1192 | static void xenvif_tx_build_gops(struct xenvif *vif, |
1193 | int budget, | ||
1194 | unsigned *copy_ops, | ||
1195 | unsigned *map_ops) | ||
1195 | { | 1196 | { |
1196 | struct gnttab_map_grant_ref *gop = vif->tx_map_ops, *request_gop; | 1197 | struct gnttab_map_grant_ref *gop = vif->tx_map_ops, *request_gop; |
1197 | struct sk_buff *skb; | 1198 | struct sk_buff *skb; |
@@ -1294,22 +1295,36 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget) | |||
1294 | } | 1295 | } |
1295 | } | 1296 | } |
1296 | 1297 | ||
1297 | xenvif_tx_create_map_op(vif, pending_idx, &txreq, gop); | ||
1298 | |||
1299 | gop++; | ||
1300 | |||
1301 | XENVIF_TX_CB(skb)->pending_idx = pending_idx; | 1298 | XENVIF_TX_CB(skb)->pending_idx = pending_idx; |
1302 | 1299 | ||
1303 | __skb_put(skb, data_len); | 1300 | __skb_put(skb, data_len); |
1301 | vif->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref; | ||
1302 | vif->tx_copy_ops[*copy_ops].source.domid = vif->domid; | ||
1303 | vif->tx_copy_ops[*copy_ops].source.offset = txreq.offset; | ||
1304 | |||
1305 | vif->tx_copy_ops[*copy_ops].dest.u.gmfn = | ||
1306 | virt_to_mfn(skb->data); | ||
1307 | vif->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF; | ||
1308 | vif->tx_copy_ops[*copy_ops].dest.offset = | ||
1309 | offset_in_page(skb->data); | ||
1310 | |||
1311 | vif->tx_copy_ops[*copy_ops].len = data_len; | ||
1312 | vif->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref; | ||
1313 | |||
1314 | (*copy_ops)++; | ||
1304 | 1315 | ||
1305 | skb_shinfo(skb)->nr_frags = ret; | 1316 | skb_shinfo(skb)->nr_frags = ret; |
1306 | if (data_len < txreq.size) { | 1317 | if (data_len < txreq.size) { |
1307 | skb_shinfo(skb)->nr_frags++; | 1318 | skb_shinfo(skb)->nr_frags++; |
1308 | frag_set_pending_idx(&skb_shinfo(skb)->frags[0], | 1319 | frag_set_pending_idx(&skb_shinfo(skb)->frags[0], |
1309 | pending_idx); | 1320 | pending_idx); |
1321 | xenvif_tx_create_map_op(vif, pending_idx, &txreq, gop); | ||
1322 | gop++; | ||
1310 | } else { | 1323 | } else { |
1311 | frag_set_pending_idx(&skb_shinfo(skb)->frags[0], | 1324 | frag_set_pending_idx(&skb_shinfo(skb)->frags[0], |
1312 | INVALID_PENDING_IDX); | 1325 | INVALID_PENDING_IDX); |
1326 | memcpy(&vif->pending_tx_info[pending_idx].req, &txreq, | ||
1327 | sizeof(txreq)); | ||
1313 | } | 1328 | } |
1314 | 1329 | ||
1315 | vif->pending_cons++; | 1330 | vif->pending_cons++; |
@@ -1326,11 +1341,13 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget) | |||
1326 | 1341 | ||
1327 | vif->tx.req_cons = idx; | 1342 | vif->tx.req_cons = idx; |
1328 | 1343 | ||
1329 | if ((gop-vif->tx_map_ops) >= ARRAY_SIZE(vif->tx_map_ops)) | 1344 | if (((gop-vif->tx_map_ops) >= ARRAY_SIZE(vif->tx_map_ops)) || |
1345 | (*copy_ops >= ARRAY_SIZE(vif->tx_copy_ops))) | ||
1330 | break; | 1346 | break; |
1331 | } | 1347 | } |
1332 | 1348 | ||
1333 | return gop - vif->tx_map_ops; | 1349 | (*map_ops) = gop - vif->tx_map_ops; |
1350 | return; | ||
1334 | } | 1351 | } |
1335 | 1352 | ||
1336 | /* Consolidate skb with a frag_list into a brand new one with local pages on | 1353 | /* Consolidate skb with a frag_list into a brand new one with local pages on |
@@ -1402,6 +1419,7 @@ static int xenvif_handle_frag_list(struct xenvif *vif, struct sk_buff *skb) | |||
1402 | static int xenvif_tx_submit(struct xenvif *vif) | 1419 | static int xenvif_tx_submit(struct xenvif *vif) |
1403 | { | 1420 | { |
1404 | struct gnttab_map_grant_ref *gop_map = vif->tx_map_ops; | 1421 | struct gnttab_map_grant_ref *gop_map = vif->tx_map_ops; |
1422 | struct gnttab_copy *gop_copy = vif->tx_copy_ops; | ||
1405 | struct sk_buff *skb; | 1423 | struct sk_buff *skb; |
1406 | int work_done = 0; | 1424 | int work_done = 0; |
1407 | 1425 | ||
@@ -1414,27 +1432,22 @@ static int xenvif_tx_submit(struct xenvif *vif) | |||
1414 | txp = &vif->pending_tx_info[pending_idx].req; | 1432 | txp = &vif->pending_tx_info[pending_idx].req; |
1415 | 1433 | ||
1416 | /* Check the remap error code. */ | 1434 | /* Check the remap error code. */ |
1417 | if (unlikely(xenvif_tx_check_gop(vif, skb, &gop_map))) { | 1435 | if (unlikely(xenvif_tx_check_gop(vif, skb, &gop_map, &gop_copy))) { |
1418 | netdev_dbg(vif->dev, "netback grant failed.\n"); | ||
1419 | skb_shinfo(skb)->nr_frags = 0; | 1436 | skb_shinfo(skb)->nr_frags = 0; |
1420 | kfree_skb(skb); | 1437 | kfree_skb(skb); |
1421 | continue; | 1438 | continue; |
1422 | } | 1439 | } |
1423 | 1440 | ||
1424 | data_len = skb->len; | 1441 | data_len = skb->len; |
1425 | memcpy(skb->data, | ||
1426 | (void *)(idx_to_kaddr(vif, pending_idx)|txp->offset), | ||
1427 | data_len); | ||
1428 | callback_param(vif, pending_idx).ctx = NULL; | 1442 | callback_param(vif, pending_idx).ctx = NULL; |
1429 | if (data_len < txp->size) { | 1443 | if (data_len < txp->size) { |
1430 | /* Append the packet payload as a fragment. */ | 1444 | /* Append the packet payload as a fragment. */ |
1431 | txp->offset += data_len; | 1445 | txp->offset += data_len; |
1432 | txp->size -= data_len; | 1446 | txp->size -= data_len; |
1433 | skb_shinfo(skb)->destructor_arg = | ||
1434 | &callback_param(vif, pending_idx); | ||
1435 | } else { | 1447 | } else { |
1436 | /* Schedule a response immediately. */ | 1448 | /* Schedule a response immediately. */ |
1437 | xenvif_idx_unmap(vif, pending_idx); | 1449 | xenvif_idx_release(vif, pending_idx, |
1450 | XEN_NETIF_RSP_OKAY); | ||
1438 | } | 1451 | } |
1439 | 1452 | ||
1440 | if (txp->flags & XEN_NETTXF_csum_blank) | 1453 | if (txp->flags & XEN_NETTXF_csum_blank) |
@@ -1613,22 +1626,25 @@ static inline void xenvif_tx_dealloc_action(struct xenvif *vif) | |||
1613 | /* Called after netfront has transmitted */ | 1626 | /* Called after netfront has transmitted */ |
1614 | int xenvif_tx_action(struct xenvif *vif, int budget) | 1627 | int xenvif_tx_action(struct xenvif *vif, int budget) |
1615 | { | 1628 | { |
1616 | unsigned nr_mops; | 1629 | unsigned nr_mops, nr_cops = 0; |
1617 | int work_done, ret; | 1630 | int work_done, ret; |
1618 | 1631 | ||
1619 | if (unlikely(!tx_work_todo(vif))) | 1632 | if (unlikely(!tx_work_todo(vif))) |
1620 | return 0; | 1633 | return 0; |
1621 | 1634 | ||
1622 | nr_mops = xenvif_tx_build_gops(vif, budget); | 1635 | xenvif_tx_build_gops(vif, budget, &nr_cops, &nr_mops); |
1623 | 1636 | ||
1624 | if (nr_mops == 0) | 1637 | if (nr_cops == 0) |
1625 | return 0; | 1638 | return 0; |
1626 | 1639 | ||
1627 | ret = gnttab_map_refs(vif->tx_map_ops, | 1640 | gnttab_batch_copy(vif->tx_copy_ops, nr_cops); |
1628 | NULL, | 1641 | if (nr_mops != 0) { |
1629 | vif->pages_to_map, | 1642 | ret = gnttab_map_refs(vif->tx_map_ops, |
1630 | nr_mops); | 1643 | NULL, |
1631 | BUG_ON(ret); | 1644 | vif->pages_to_map, |
1645 | nr_mops); | ||
1646 | BUG_ON(ret); | ||
1647 | } | ||
1632 | 1648 | ||
1633 | work_done = xenvif_tx_submit(vif); | 1649 | work_done = xenvif_tx_submit(vif); |
1634 | 1650 | ||