aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netback/netback.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/xen-netback/netback.c')
-rw-r--r--drivers/net/xen-netback/netback.c54
1 files changed, 33 insertions, 21 deletions
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index fd00f25d9850..8d70b44fcd8a 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -60,6 +60,9 @@ struct netbk_rx_meta {
60 60
61#define MAX_PENDING_REQS 256 61#define MAX_PENDING_REQS 256
62 62
63/* Discriminate from any valid pending_idx value. */
64#define INVALID_PENDING_IDX 0xFFFF
65
63#define MAX_BUFFER_OFFSET PAGE_SIZE 66#define MAX_BUFFER_OFFSET PAGE_SIZE
64 67
65/* extra field used in struct page */ 68/* extra field used in struct page */
@@ -155,13 +158,13 @@ static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
155 u16 flags); 158 u16 flags);
156 159
157static inline unsigned long idx_to_pfn(struct xen_netbk *netbk, 160static inline unsigned long idx_to_pfn(struct xen_netbk *netbk,
158 unsigned int idx) 161 u16 idx)
159{ 162{
160 return page_to_pfn(netbk->mmap_pages[idx]); 163 return page_to_pfn(netbk->mmap_pages[idx]);
161} 164}
162 165
163static inline unsigned long idx_to_kaddr(struct xen_netbk *netbk, 166static inline unsigned long idx_to_kaddr(struct xen_netbk *netbk,
164 unsigned int idx) 167 u16 idx)
165{ 168{
166 return (unsigned long)pfn_to_kaddr(idx_to_pfn(netbk, idx)); 169 return (unsigned long)pfn_to_kaddr(idx_to_pfn(netbk, idx));
167} 170}
@@ -215,6 +218,16 @@ static int get_page_ext(struct page *pg,
215 sizeof(struct iphdr) + MAX_IPOPTLEN + \ 218 sizeof(struct iphdr) + MAX_IPOPTLEN + \
216 sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE) 219 sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE)
217 220
221static u16 frag_get_pending_idx(skb_frag_t *frag)
222{
223 return (u16)frag->page_offset;
224}
225
226static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx)
227{
228 frag->page_offset = pending_idx;
229}
230
218static inline pending_ring_idx_t pending_index(unsigned i) 231static inline pending_ring_idx_t pending_index(unsigned i)
219{ 232{
220 return i & (MAX_PENDING_REQS-1); 233 return i & (MAX_PENDING_REQS-1);
@@ -512,7 +525,7 @@ static int netbk_gop_skb(struct sk_buff *skb,
512 525
513 for (i = 0; i < nr_frags; i++) { 526 for (i = 0; i < nr_frags; i++) {
514 netbk_gop_frag_copy(vif, skb, npo, 527 netbk_gop_frag_copy(vif, skb, npo,
515 skb_shinfo(skb)->frags[i].page, 528 skb_frag_page(&skb_shinfo(skb)->frags[i]),
516 skb_shinfo(skb)->frags[i].size, 529 skb_shinfo(skb)->frags[i].size,
517 skb_shinfo(skb)->frags[i].page_offset, 530 skb_shinfo(skb)->frags[i].page_offset,
518 &head); 531 &head);
@@ -890,7 +903,7 @@ static int netbk_count_requests(struct xenvif *vif,
890 903
891static struct page *xen_netbk_alloc_page(struct xen_netbk *netbk, 904static struct page *xen_netbk_alloc_page(struct xen_netbk *netbk,
892 struct sk_buff *skb, 905 struct sk_buff *skb,
893 unsigned long pending_idx) 906 u16 pending_idx)
894{ 907{
895 struct page *page; 908 struct page *page;
896 page = alloc_page(GFP_KERNEL|__GFP_COLD); 909 page = alloc_page(GFP_KERNEL|__GFP_COLD);
@@ -909,11 +922,11 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
909{ 922{
910 struct skb_shared_info *shinfo = skb_shinfo(skb); 923 struct skb_shared_info *shinfo = skb_shinfo(skb);
911 skb_frag_t *frags = shinfo->frags; 924 skb_frag_t *frags = shinfo->frags;
912 unsigned long pending_idx = *((u16 *)skb->data); 925 u16 pending_idx = *((u16 *)skb->data);
913 int i, start; 926 int i, start;
914 927
915 /* Skip first skb fragment if it is on same page as header fragment. */ 928 /* Skip first skb fragment if it is on same page as header fragment. */
916 start = ((unsigned long)shinfo->frags[0].page == pending_idx); 929 start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
917 930
918 for (i = start; i < shinfo->nr_frags; i++, txp++) { 931 for (i = start; i < shinfo->nr_frags; i++, txp++) {
919 struct page *page; 932 struct page *page;
@@ -945,7 +958,7 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
945 memcpy(&pending_tx_info[pending_idx].req, txp, sizeof(*txp)); 958 memcpy(&pending_tx_info[pending_idx].req, txp, sizeof(*txp));
946 xenvif_get(vif); 959 xenvif_get(vif);
947 pending_tx_info[pending_idx].vif = vif; 960 pending_tx_info[pending_idx].vif = vif;
948 frags[i].page = (void *)pending_idx; 961 frag_set_pending_idx(&frags[i], pending_idx);
949 } 962 }
950 963
951 return gop; 964 return gop;
@@ -956,7 +969,7 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
956 struct gnttab_copy **gopp) 969 struct gnttab_copy **gopp)
957{ 970{
958 struct gnttab_copy *gop = *gopp; 971 struct gnttab_copy *gop = *gopp;
959 int pending_idx = *((u16 *)skb->data); 972 u16 pending_idx = *((u16 *)skb->data);
960 struct pending_tx_info *pending_tx_info = netbk->pending_tx_info; 973 struct pending_tx_info *pending_tx_info = netbk->pending_tx_info;
961 struct xenvif *vif = pending_tx_info[pending_idx].vif; 974 struct xenvif *vif = pending_tx_info[pending_idx].vif;
962 struct xen_netif_tx_request *txp; 975 struct xen_netif_tx_request *txp;
@@ -976,13 +989,13 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
976 } 989 }
977 990
978 /* Skip first skb fragment if it is on same page as header fragment. */ 991 /* Skip first skb fragment if it is on same page as header fragment. */
979 start = ((unsigned long)shinfo->frags[0].page == pending_idx); 992 start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
980 993
981 for (i = start; i < nr_frags; i++) { 994 for (i = start; i < nr_frags; i++) {
982 int j, newerr; 995 int j, newerr;
983 pending_ring_idx_t index; 996 pending_ring_idx_t index;
984 997
985 pending_idx = (unsigned long)shinfo->frags[i].page; 998 pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
986 999
987 /* Check error status: if okay then remember grant handle. */ 1000 /* Check error status: if okay then remember grant handle. */
988 newerr = (++gop)->status; 1001 newerr = (++gop)->status;
@@ -1008,7 +1021,7 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
1008 pending_idx = *((u16 *)skb->data); 1021 pending_idx = *((u16 *)skb->data);
1009 xen_netbk_idx_release(netbk, pending_idx); 1022 xen_netbk_idx_release(netbk, pending_idx);
1010 for (j = start; j < i; j++) { 1023 for (j = start; j < i; j++) {
1011 pending_idx = (unsigned long)shinfo->frags[i].page; 1024 pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
1012 xen_netbk_idx_release(netbk, pending_idx); 1025 xen_netbk_idx_release(netbk, pending_idx);
1013 } 1026 }
1014 1027
@@ -1029,15 +1042,14 @@ static void xen_netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb)
1029 for (i = 0; i < nr_frags; i++) { 1042 for (i = 0; i < nr_frags; i++) {
1030 skb_frag_t *frag = shinfo->frags + i; 1043 skb_frag_t *frag = shinfo->frags + i;
1031 struct xen_netif_tx_request *txp; 1044 struct xen_netif_tx_request *txp;
1032 unsigned long pending_idx; 1045 struct page *page;
1046 u16 pending_idx;
1033 1047
1034 pending_idx = (unsigned long)frag->page; 1048 pending_idx = frag_get_pending_idx(frag);
1035 1049
1036 txp = &netbk->pending_tx_info[pending_idx].req; 1050 txp = &netbk->pending_tx_info[pending_idx].req;
1037 frag->page = virt_to_page(idx_to_kaddr(netbk, pending_idx)); 1051 page = virt_to_page(idx_to_kaddr(netbk, pending_idx));
1038 frag->size = txp->size; 1052 __skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
1039 frag->page_offset = txp->offset;
1040
1041 skb->len += txp->size; 1053 skb->len += txp->size;
1042 skb->data_len += txp->size; 1054 skb->data_len += txp->size;
1043 skb->truesize += txp->size; 1055 skb->truesize += txp->size;
@@ -1349,11 +1361,11 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
1349 skb_shinfo(skb)->nr_frags = ret; 1361 skb_shinfo(skb)->nr_frags = ret;
1350 if (data_len < txreq.size) { 1362 if (data_len < txreq.size) {
1351 skb_shinfo(skb)->nr_frags++; 1363 skb_shinfo(skb)->nr_frags++;
1352 skb_shinfo(skb)->frags[0].page = 1364 frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1353 (void *)(unsigned long)pending_idx; 1365 pending_idx);
1354 } else { 1366 } else {
1355 /* Discriminate from any valid pending_idx value. */ 1367 frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1356 skb_shinfo(skb)->frags[0].page = (void *)~0UL; 1368 INVALID_PENDING_IDX);
1357 } 1369 }
1358 1370
1359 __skb_queue_tail(&netbk->tx_queue, skb); 1371 __skb_queue_tail(&netbk->tx_queue, skb);