diff options
Diffstat (limited to 'drivers/net/xen-netfront.c')
-rw-r--r-- | drivers/net/xen-netfront.c | 38 |
1 files changed, 21 insertions, 17 deletions
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index d7c8a98daff6..226faab23603 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c | |||
@@ -275,7 +275,7 @@ no_skb: | |||
275 | break; | 275 | break; |
276 | } | 276 | } |
277 | 277 | ||
278 | skb_shinfo(skb)->frags[0].page = page; | 278 | __skb_fill_page_desc(skb, 0, page, 0, 0); |
279 | skb_shinfo(skb)->nr_frags = 1; | 279 | skb_shinfo(skb)->nr_frags = 1; |
280 | __skb_queue_tail(&np->rx_batch, skb); | 280 | __skb_queue_tail(&np->rx_batch, skb); |
281 | } | 281 | } |
@@ -309,8 +309,8 @@ no_skb: | |||
309 | BUG_ON((signed short)ref < 0); | 309 | BUG_ON((signed short)ref < 0); |
310 | np->grant_rx_ref[id] = ref; | 310 | np->grant_rx_ref[id] = ref; |
311 | 311 | ||
312 | pfn = page_to_pfn(skb_shinfo(skb)->frags[0].page); | 312 | pfn = page_to_pfn(skb_frag_page(&skb_shinfo(skb)->frags[0])); |
313 | vaddr = page_address(skb_shinfo(skb)->frags[0].page); | 313 | vaddr = page_address(skb_frag_page(&skb_shinfo(skb)->frags[0])); |
314 | 314 | ||
315 | req = RING_GET_REQUEST(&np->rx, req_prod + i); | 315 | req = RING_GET_REQUEST(&np->rx, req_prod + i); |
316 | gnttab_grant_foreign_access_ref(ref, | 316 | gnttab_grant_foreign_access_ref(ref, |
@@ -461,13 +461,13 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev, | |||
461 | ref = gnttab_claim_grant_reference(&np->gref_tx_head); | 461 | ref = gnttab_claim_grant_reference(&np->gref_tx_head); |
462 | BUG_ON((signed short)ref < 0); | 462 | BUG_ON((signed short)ref < 0); |
463 | 463 | ||
464 | mfn = pfn_to_mfn(page_to_pfn(frag->page)); | 464 | mfn = pfn_to_mfn(page_to_pfn(skb_frag_page(frag))); |
465 | gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, | 465 | gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, |
466 | mfn, GNTMAP_readonly); | 466 | mfn, GNTMAP_readonly); |
467 | 467 | ||
468 | tx->gref = np->grant_tx_ref[id] = ref; | 468 | tx->gref = np->grant_tx_ref[id] = ref; |
469 | tx->offset = frag->page_offset; | 469 | tx->offset = frag->page_offset; |
470 | tx->size = frag->size; | 470 | tx->size = skb_frag_size(frag); |
471 | tx->flags = 0; | 471 | tx->flags = 0; |
472 | } | 472 | } |
473 | 473 | ||
@@ -762,23 +762,22 @@ static RING_IDX xennet_fill_frags(struct netfront_info *np, | |||
762 | struct skb_shared_info *shinfo = skb_shinfo(skb); | 762 | struct skb_shared_info *shinfo = skb_shinfo(skb); |
763 | int nr_frags = shinfo->nr_frags; | 763 | int nr_frags = shinfo->nr_frags; |
764 | RING_IDX cons = np->rx.rsp_cons; | 764 | RING_IDX cons = np->rx.rsp_cons; |
765 | skb_frag_t *frag = shinfo->frags + nr_frags; | ||
766 | struct sk_buff *nskb; | 765 | struct sk_buff *nskb; |
767 | 766 | ||
768 | while ((nskb = __skb_dequeue(list))) { | 767 | while ((nskb = __skb_dequeue(list))) { |
769 | struct xen_netif_rx_response *rx = | 768 | struct xen_netif_rx_response *rx = |
770 | RING_GET_RESPONSE(&np->rx, ++cons); | 769 | RING_GET_RESPONSE(&np->rx, ++cons); |
770 | skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0]; | ||
771 | 771 | ||
772 | frag->page = skb_shinfo(nskb)->frags[0].page; | 772 | __skb_fill_page_desc(skb, nr_frags, |
773 | frag->page_offset = rx->offset; | 773 | skb_frag_page(nfrag), |
774 | frag->size = rx->status; | 774 | rx->offset, rx->status); |
775 | 775 | ||
776 | skb->data_len += rx->status; | 776 | skb->data_len += rx->status; |
777 | 777 | ||
778 | skb_shinfo(nskb)->nr_frags = 0; | 778 | skb_shinfo(nskb)->nr_frags = 0; |
779 | kfree_skb(nskb); | 779 | kfree_skb(nskb); |
780 | 780 | ||
781 | frag++; | ||
782 | nr_frags++; | 781 | nr_frags++; |
783 | } | 782 | } |
784 | 783 | ||
@@ -873,7 +872,7 @@ static int handle_incoming_queue(struct net_device *dev, | |||
873 | memcpy(skb->data, vaddr + offset, | 872 | memcpy(skb->data, vaddr + offset, |
874 | skb_headlen(skb)); | 873 | skb_headlen(skb)); |
875 | 874 | ||
876 | if (page != skb_shinfo(skb)->frags[0].page) | 875 | if (page != skb_frag_page(&skb_shinfo(skb)->frags[0])) |
877 | __free_page(page); | 876 | __free_page(page); |
878 | 877 | ||
879 | /* Ethernet work: Delayed to here as it peeks the header. */ | 878 | /* Ethernet work: Delayed to here as it peeks the header. */ |
@@ -954,7 +953,8 @@ err: | |||
954 | } | 953 | } |
955 | } | 954 | } |
956 | 955 | ||
957 | NETFRONT_SKB_CB(skb)->page = skb_shinfo(skb)->frags[0].page; | 956 | NETFRONT_SKB_CB(skb)->page = |
957 | skb_frag_page(&skb_shinfo(skb)->frags[0]); | ||
958 | NETFRONT_SKB_CB(skb)->offset = rx->offset; | 958 | NETFRONT_SKB_CB(skb)->offset = rx->offset; |
959 | 959 | ||
960 | len = rx->status; | 960 | len = rx->status; |
@@ -965,10 +965,10 @@ err: | |||
965 | if (rx->status > len) { | 965 | if (rx->status > len) { |
966 | skb_shinfo(skb)->frags[0].page_offset = | 966 | skb_shinfo(skb)->frags[0].page_offset = |
967 | rx->offset + len; | 967 | rx->offset + len; |
968 | skb_shinfo(skb)->frags[0].size = rx->status - len; | 968 | skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status - len); |
969 | skb->data_len = rx->status - len; | 969 | skb->data_len = rx->status - len; |
970 | } else { | 970 | } else { |
971 | skb_shinfo(skb)->frags[0].page = NULL; | 971 | __skb_fill_page_desc(skb, 0, NULL, 0, 0); |
972 | skb_shinfo(skb)->nr_frags = 0; | 972 | skb_shinfo(skb)->nr_frags = 0; |
973 | } | 973 | } |
974 | 974 | ||
@@ -1143,7 +1143,8 @@ static void xennet_release_rx_bufs(struct netfront_info *np) | |||
1143 | 1143 | ||
1144 | if (!xen_feature(XENFEAT_auto_translated_physmap)) { | 1144 | if (!xen_feature(XENFEAT_auto_translated_physmap)) { |
1145 | /* Remap the page. */ | 1145 | /* Remap the page. */ |
1146 | struct page *page = skb_shinfo(skb)->frags[0].page; | 1146 | const struct page *page = |
1147 | skb_frag_page(&skb_shinfo(skb)->frags[0]); | ||
1147 | unsigned long pfn = page_to_pfn(page); | 1148 | unsigned long pfn = page_to_pfn(page); |
1148 | void *vaddr = page_address(page); | 1149 | void *vaddr = page_address(page); |
1149 | 1150 | ||
@@ -1650,6 +1651,8 @@ static int xennet_connect(struct net_device *dev) | |||
1650 | 1651 | ||
1651 | /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */ | 1652 | /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */ |
1652 | for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) { | 1653 | for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) { |
1654 | skb_frag_t *frag; | ||
1655 | const struct page *page; | ||
1653 | if (!np->rx_skbs[i]) | 1656 | if (!np->rx_skbs[i]) |
1654 | continue; | 1657 | continue; |
1655 | 1658 | ||
@@ -1657,10 +1660,11 @@ static int xennet_connect(struct net_device *dev) | |||
1657 | ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i); | 1660 | ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i); |
1658 | req = RING_GET_REQUEST(&np->rx, requeue_idx); | 1661 | req = RING_GET_REQUEST(&np->rx, requeue_idx); |
1659 | 1662 | ||
1663 | frag = &skb_shinfo(skb)->frags[0]; | ||
1664 | page = skb_frag_page(frag); | ||
1660 | gnttab_grant_foreign_access_ref( | 1665 | gnttab_grant_foreign_access_ref( |
1661 | ref, np->xbdev->otherend_id, | 1666 | ref, np->xbdev->otherend_id, |
1662 | pfn_to_mfn(page_to_pfn(skb_shinfo(skb)-> | 1667 | pfn_to_mfn(page_to_pfn(page)), |
1663 | frags->page)), | ||
1664 | 0); | 1668 | 0); |
1665 | req->gref = ref; | 1669 | req->gref = ref; |
1666 | req->id = requeue_idx; | 1670 | req->id = requeue_idx; |