aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netback/netback.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/xen-netback/netback.c')
-rw-r--r--drivers/net/xen-netback/netback.c138
1 files changed, 52 insertions, 86 deletions
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index fd00f25d9850..0cb594c86090 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -60,6 +60,9 @@ struct netbk_rx_meta {
60 60
61#define MAX_PENDING_REQS 256 61#define MAX_PENDING_REQS 256
62 62
63/* Discriminate from any valid pending_idx value. */
64#define INVALID_PENDING_IDX 0xFFFF
65
63#define MAX_BUFFER_OFFSET PAGE_SIZE 66#define MAX_BUFFER_OFFSET PAGE_SIZE
64 67
65/* extra field used in struct page */ 68/* extra field used in struct page */
@@ -155,13 +158,13 @@ static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
155 u16 flags); 158 u16 flags);
156 159
157static inline unsigned long idx_to_pfn(struct xen_netbk *netbk, 160static inline unsigned long idx_to_pfn(struct xen_netbk *netbk,
158 unsigned int idx) 161 u16 idx)
159{ 162{
160 return page_to_pfn(netbk->mmap_pages[idx]); 163 return page_to_pfn(netbk->mmap_pages[idx]);
161} 164}
162 165
163static inline unsigned long idx_to_kaddr(struct xen_netbk *netbk, 166static inline unsigned long idx_to_kaddr(struct xen_netbk *netbk,
164 unsigned int idx) 167 u16 idx)
165{ 168{
166 return (unsigned long)pfn_to_kaddr(idx_to_pfn(netbk, idx)); 169 return (unsigned long)pfn_to_kaddr(idx_to_pfn(netbk, idx));
167} 170}
@@ -215,6 +218,16 @@ static int get_page_ext(struct page *pg,
215 sizeof(struct iphdr) + MAX_IPOPTLEN + \ 218 sizeof(struct iphdr) + MAX_IPOPTLEN + \
216 sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE) 219 sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE)
217 220
221static u16 frag_get_pending_idx(skb_frag_t *frag)
222{
223 return (u16)frag->page_offset;
224}
225
226static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx)
227{
228 frag->page_offset = pending_idx;
229}
230
218static inline pending_ring_idx_t pending_index(unsigned i) 231static inline pending_ring_idx_t pending_index(unsigned i)
219{ 232{
220 return i & (MAX_PENDING_REQS-1); 233 return i & (MAX_PENDING_REQS-1);
@@ -321,7 +334,7 @@ unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
321 count++; 334 count++;
322 335
323 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 336 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
324 unsigned long size = skb_shinfo(skb)->frags[i].size; 337 unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
325 unsigned long bytes; 338 unsigned long bytes;
326 while (size > 0) { 339 while (size > 0) {
327 BUG_ON(copy_off > MAX_BUFFER_OFFSET); 340 BUG_ON(copy_off > MAX_BUFFER_OFFSET);
@@ -512,8 +525,8 @@ static int netbk_gop_skb(struct sk_buff *skb,
512 525
513 for (i = 0; i < nr_frags; i++) { 526 for (i = 0; i < nr_frags; i++) {
514 netbk_gop_frag_copy(vif, skb, npo, 527 netbk_gop_frag_copy(vif, skb, npo,
515 skb_shinfo(skb)->frags[i].page, 528 skb_frag_page(&skb_shinfo(skb)->frags[i]),
516 skb_shinfo(skb)->frags[i].size, 529 skb_frag_size(&skb_shinfo(skb)->frags[i]),
517 skb_shinfo(skb)->frags[i].page_offset, 530 skb_shinfo(skb)->frags[i].page_offset,
518 &head); 531 &head);
519 } 532 }
@@ -890,7 +903,7 @@ static int netbk_count_requests(struct xenvif *vif,
890 903
891static struct page *xen_netbk_alloc_page(struct xen_netbk *netbk, 904static struct page *xen_netbk_alloc_page(struct xen_netbk *netbk,
892 struct sk_buff *skb, 905 struct sk_buff *skb,
893 unsigned long pending_idx) 906 u16 pending_idx)
894{ 907{
895 struct page *page; 908 struct page *page;
896 page = alloc_page(GFP_KERNEL|__GFP_COLD); 909 page = alloc_page(GFP_KERNEL|__GFP_COLD);
@@ -909,11 +922,11 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
909{ 922{
910 struct skb_shared_info *shinfo = skb_shinfo(skb); 923 struct skb_shared_info *shinfo = skb_shinfo(skb);
911 skb_frag_t *frags = shinfo->frags; 924 skb_frag_t *frags = shinfo->frags;
912 unsigned long pending_idx = *((u16 *)skb->data); 925 u16 pending_idx = *((u16 *)skb->data);
913 int i, start; 926 int i, start;
914 927
915 /* Skip first skb fragment if it is on same page as header fragment. */ 928 /* Skip first skb fragment if it is on same page as header fragment. */
916 start = ((unsigned long)shinfo->frags[0].page == pending_idx); 929 start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
917 930
918 for (i = start; i < shinfo->nr_frags; i++, txp++) { 931 for (i = start; i < shinfo->nr_frags; i++, txp++) {
919 struct page *page; 932 struct page *page;
@@ -945,7 +958,7 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
945 memcpy(&pending_tx_info[pending_idx].req, txp, sizeof(*txp)); 958 memcpy(&pending_tx_info[pending_idx].req, txp, sizeof(*txp));
946 xenvif_get(vif); 959 xenvif_get(vif);
947 pending_tx_info[pending_idx].vif = vif; 960 pending_tx_info[pending_idx].vif = vif;
948 frags[i].page = (void *)pending_idx; 961 frag_set_pending_idx(&frags[i], pending_idx);
949 } 962 }
950 963
951 return gop; 964 return gop;
@@ -956,7 +969,7 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
956 struct gnttab_copy **gopp) 969 struct gnttab_copy **gopp)
957{ 970{
958 struct gnttab_copy *gop = *gopp; 971 struct gnttab_copy *gop = *gopp;
959 int pending_idx = *((u16 *)skb->data); 972 u16 pending_idx = *((u16 *)skb->data);
960 struct pending_tx_info *pending_tx_info = netbk->pending_tx_info; 973 struct pending_tx_info *pending_tx_info = netbk->pending_tx_info;
961 struct xenvif *vif = pending_tx_info[pending_idx].vif; 974 struct xenvif *vif = pending_tx_info[pending_idx].vif;
962 struct xen_netif_tx_request *txp; 975 struct xen_netif_tx_request *txp;
@@ -976,13 +989,13 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
976 } 989 }
977 990
978 /* Skip first skb fragment if it is on same page as header fragment. */ 991 /* Skip first skb fragment if it is on same page as header fragment. */
979 start = ((unsigned long)shinfo->frags[0].page == pending_idx); 992 start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
980 993
981 for (i = start; i < nr_frags; i++) { 994 for (i = start; i < nr_frags; i++) {
982 int j, newerr; 995 int j, newerr;
983 pending_ring_idx_t index; 996 pending_ring_idx_t index;
984 997
985 pending_idx = (unsigned long)shinfo->frags[i].page; 998 pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
986 999
987 /* Check error status: if okay then remember grant handle. */ 1000 /* Check error status: if okay then remember grant handle. */
988 newerr = (++gop)->status; 1001 newerr = (++gop)->status;
@@ -1008,7 +1021,7 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
1008 pending_idx = *((u16 *)skb->data); 1021 pending_idx = *((u16 *)skb->data);
1009 xen_netbk_idx_release(netbk, pending_idx); 1022 xen_netbk_idx_release(netbk, pending_idx);
1010 for (j = start; j < i; j++) { 1023 for (j = start; j < i; j++) {
1011 pending_idx = (unsigned long)shinfo->frags[i].page; 1024 pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
1012 xen_netbk_idx_release(netbk, pending_idx); 1025 xen_netbk_idx_release(netbk, pending_idx);
1013 } 1026 }
1014 1027
@@ -1029,15 +1042,14 @@ static void xen_netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb)
1029 for (i = 0; i < nr_frags; i++) { 1042 for (i = 0; i < nr_frags; i++) {
1030 skb_frag_t *frag = shinfo->frags + i; 1043 skb_frag_t *frag = shinfo->frags + i;
1031 struct xen_netif_tx_request *txp; 1044 struct xen_netif_tx_request *txp;
1032 unsigned long pending_idx; 1045 struct page *page;
1046 u16 pending_idx;
1033 1047
1034 pending_idx = (unsigned long)frag->page; 1048 pending_idx = frag_get_pending_idx(frag);
1035 1049
1036 txp = &netbk->pending_tx_info[pending_idx].req; 1050 txp = &netbk->pending_tx_info[pending_idx].req;
1037 frag->page = virt_to_page(idx_to_kaddr(netbk, pending_idx)); 1051 page = virt_to_page(idx_to_kaddr(netbk, pending_idx));
1038 frag->size = txp->size; 1052 __skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
1039 frag->page_offset = txp->offset;
1040
1041 skb->len += txp->size; 1053 skb->len += txp->size;
1042 skb->data_len += txp->size; 1054 skb->data_len += txp->size;
1043 skb->truesize += txp->size; 1055 skb->truesize += txp->size;
@@ -1349,11 +1361,11 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
1349 skb_shinfo(skb)->nr_frags = ret; 1361 skb_shinfo(skb)->nr_frags = ret;
1350 if (data_len < txreq.size) { 1362 if (data_len < txreq.size) {
1351 skb_shinfo(skb)->nr_frags++; 1363 skb_shinfo(skb)->nr_frags++;
1352 skb_shinfo(skb)->frags[0].page = 1364 frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1353 (void *)(unsigned long)pending_idx; 1365 pending_idx);
1354 } else { 1366 } else {
1355 /* Discriminate from any valid pending_idx value. */ 1367 frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1356 skb_shinfo(skb)->frags[0].page = (void *)~0UL; 1368 INVALID_PENDING_IDX);
1357 } 1369 }
1358 1370
1359 __skb_queue_tail(&netbk->tx_queue, skb); 1371 __skb_queue_tail(&netbk->tx_queue, skb);
@@ -1577,88 +1589,42 @@ static int xen_netbk_kthread(void *data)
1577 1589
1578void xen_netbk_unmap_frontend_rings(struct xenvif *vif) 1590void xen_netbk_unmap_frontend_rings(struct xenvif *vif)
1579{ 1591{
1580 struct gnttab_unmap_grant_ref op; 1592 if (vif->tx.sring)
1581 1593 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
1582 if (vif->tx.sring) { 1594 vif->tx.sring);
1583 gnttab_set_unmap_op(&op, (unsigned long)vif->tx_comms_area->addr, 1595 if (vif->rx.sring)
1584 GNTMAP_host_map, vif->tx_shmem_handle); 1596 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
1585 1597 vif->rx.sring);
1586 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
1587 BUG();
1588 }
1589
1590 if (vif->rx.sring) {
1591 gnttab_set_unmap_op(&op, (unsigned long)vif->rx_comms_area->addr,
1592 GNTMAP_host_map, vif->rx_shmem_handle);
1593
1594 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
1595 BUG();
1596 }
1597 if (vif->rx_comms_area)
1598 free_vm_area(vif->rx_comms_area);
1599 if (vif->tx_comms_area)
1600 free_vm_area(vif->tx_comms_area);
1601} 1598}
1602 1599
1603int xen_netbk_map_frontend_rings(struct xenvif *vif, 1600int xen_netbk_map_frontend_rings(struct xenvif *vif,
1604 grant_ref_t tx_ring_ref, 1601 grant_ref_t tx_ring_ref,
1605 grant_ref_t rx_ring_ref) 1602 grant_ref_t rx_ring_ref)
1606{ 1603{
1607 struct gnttab_map_grant_ref op; 1604 void *addr;
1608 struct xen_netif_tx_sring *txs; 1605 struct xen_netif_tx_sring *txs;
1609 struct xen_netif_rx_sring *rxs; 1606 struct xen_netif_rx_sring *rxs;
1610 1607
1611 int err = -ENOMEM; 1608 int err = -ENOMEM;
1612 1609
1613 vif->tx_comms_area = alloc_vm_area(PAGE_SIZE); 1610 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
1614 if (vif->tx_comms_area == NULL) 1611 tx_ring_ref, &addr);
1612 if (err)
1615 goto err; 1613 goto err;
1616 1614
1617 vif->rx_comms_area = alloc_vm_area(PAGE_SIZE); 1615 txs = (struct xen_netif_tx_sring *)addr;
1618 if (vif->rx_comms_area == NULL)
1619 goto err;
1620
1621 gnttab_set_map_op(&op, (unsigned long)vif->tx_comms_area->addr,
1622 GNTMAP_host_map, tx_ring_ref, vif->domid);
1623
1624 if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
1625 BUG();
1626
1627 if (op.status) {
1628 netdev_warn(vif->dev,
1629 "failed to map tx ring. err=%d status=%d\n",
1630 err, op.status);
1631 err = op.status;
1632 goto err;
1633 }
1634
1635 vif->tx_shmem_ref = tx_ring_ref;
1636 vif->tx_shmem_handle = op.handle;
1637
1638 txs = (struct xen_netif_tx_sring *)vif->tx_comms_area->addr;
1639 BACK_RING_INIT(&vif->tx, txs, PAGE_SIZE); 1616 BACK_RING_INIT(&vif->tx, txs, PAGE_SIZE);
1640 1617
1641 gnttab_set_map_op(&op, (unsigned long)vif->rx_comms_area->addr, 1618 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
1642 GNTMAP_host_map, rx_ring_ref, vif->domid); 1619 rx_ring_ref, &addr);
1643 1620 if (err)
1644 if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
1645 BUG();
1646
1647 if (op.status) {
1648 netdev_warn(vif->dev,
1649 "failed to map rx ring. err=%d status=%d\n",
1650 err, op.status);
1651 err = op.status;
1652 goto err; 1621 goto err;
1653 }
1654 1622
1655 vif->rx_shmem_ref = rx_ring_ref; 1623 rxs = (struct xen_netif_rx_sring *)addr;
1656 vif->rx_shmem_handle = op.handle;
1657 vif->rx_req_cons_peek = 0;
1658
1659 rxs = (struct xen_netif_rx_sring *)vif->rx_comms_area->addr;
1660 BACK_RING_INIT(&vif->rx, rxs, PAGE_SIZE); 1624 BACK_RING_INIT(&vif->rx, rxs, PAGE_SIZE);
1661 1625
1626 vif->rx_req_cons_peek = 0;
1627
1662 return 0; 1628 return 0;
1663 1629
1664err: 1630err: