diff options
author | Wei Liu <wei.liu2@citrix.com> | 2013-08-26 07:59:37 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2013-08-29 01:18:04 -0400 |
commit | 43e9d1943278e96150b449ea1fa81f4ae27729d5 (patch) | |
tree | 76c3752f6a2c4f6284b55b783ba78d686b36da14 /drivers/net/xen-netback/netback.c | |
parent | 5b2941b18dc5f60a5c14a5c15693f9c58b0dd922 (diff) |
xen-netback: remove page tracking facility
The data flow from DomU to DomU on the same host in current copying
scheme with tracking facility:
copy
DomU --------> Dom0 DomU
| ^
|____________________________|
copy
The page in Dom0 is a page with valid MFN. So we can always copy from
page Dom0, thus removing the need for a tracking facility.
copy copy
DomU --------> Dom0 -------> DomU
Simple iperf test shows no performance regression (obviously we copy
twice either way):
W/ tracking: ~5.3Gb/s
W/o tracking: ~5.4Gb/s
Signed-off-by: Wei Liu <wei.liu2@citrix.com>
Acked-by: Ian Campbell <ian.campbell@citrix.com>
Acked-by: Matt Wilson <msw@amazon.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/xen-netback/netback.c')
-rw-r--r-- | drivers/net/xen-netback/netback.c | 77 |
1 files changed, 2 insertions, 75 deletions
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 64828de25d9a..91f163d03a49 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c | |||
@@ -95,21 +95,6 @@ struct netbk_rx_meta { | |||
95 | 95 | ||
96 | #define MAX_BUFFER_OFFSET PAGE_SIZE | 96 | #define MAX_BUFFER_OFFSET PAGE_SIZE |
97 | 97 | ||
98 | /* extra field used in struct page */ | ||
99 | union page_ext { | ||
100 | struct { | ||
101 | #if BITS_PER_LONG < 64 | ||
102 | #define IDX_WIDTH 8 | ||
103 | #define GROUP_WIDTH (BITS_PER_LONG - IDX_WIDTH) | ||
104 | unsigned int group:GROUP_WIDTH; | ||
105 | unsigned int idx:IDX_WIDTH; | ||
106 | #else | ||
107 | unsigned int group, idx; | ||
108 | #endif | ||
109 | } e; | ||
110 | void *mapping; | ||
111 | }; | ||
112 | |||
113 | struct xen_netbk { | 98 | struct xen_netbk { |
114 | wait_queue_head_t wq; | 99 | wait_queue_head_t wq; |
115 | struct task_struct *task; | 100 | struct task_struct *task; |
@@ -214,45 +199,6 @@ static inline unsigned long idx_to_kaddr(struct xen_netbk *netbk, | |||
214 | return (unsigned long)pfn_to_kaddr(idx_to_pfn(netbk, idx)); | 199 | return (unsigned long)pfn_to_kaddr(idx_to_pfn(netbk, idx)); |
215 | } | 200 | } |
216 | 201 | ||
217 | /* extra field used in struct page */ | ||
218 | static inline void set_page_ext(struct page *pg, struct xen_netbk *netbk, | ||
219 | unsigned int idx) | ||
220 | { | ||
221 | unsigned int group = netbk - xen_netbk; | ||
222 | union page_ext ext = { .e = { .group = group + 1, .idx = idx } }; | ||
223 | |||
224 | BUILD_BUG_ON(sizeof(ext) > sizeof(ext.mapping)); | ||
225 | pg->mapping = ext.mapping; | ||
226 | } | ||
227 | |||
228 | static int get_page_ext(struct page *pg, | ||
229 | unsigned int *pgroup, unsigned int *pidx) | ||
230 | { | ||
231 | union page_ext ext = { .mapping = pg->mapping }; | ||
232 | struct xen_netbk *netbk; | ||
233 | unsigned int group, idx; | ||
234 | |||
235 | group = ext.e.group - 1; | ||
236 | |||
237 | if (group < 0 || group >= xen_netbk_group_nr) | ||
238 | return 0; | ||
239 | |||
240 | netbk = &xen_netbk[group]; | ||
241 | |||
242 | idx = ext.e.idx; | ||
243 | |||
244 | if ((idx < 0) || (idx >= MAX_PENDING_REQS)) | ||
245 | return 0; | ||
246 | |||
247 | if (netbk->mmap_pages[idx] != pg) | ||
248 | return 0; | ||
249 | |||
250 | *pgroup = group; | ||
251 | *pidx = idx; | ||
252 | |||
253 | return 1; | ||
254 | } | ||
255 | |||
256 | /* | 202 | /* |
257 | * This is the amount of packet we copy rather than map, so that the | 203 | * This is the amount of packet we copy rather than map, so that the |
258 | * guest can't fiddle with the contents of the headers while we do | 204 | * guest can't fiddle with the contents of the headers while we do |
@@ -453,12 +399,6 @@ static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, | |||
453 | { | 399 | { |
454 | struct gnttab_copy *copy_gop; | 400 | struct gnttab_copy *copy_gop; |
455 | struct netbk_rx_meta *meta; | 401 | struct netbk_rx_meta *meta; |
456 | /* | ||
457 | * These variables are used iff get_page_ext returns true, | ||
458 | * in which case they are guaranteed to be initialized. | ||
459 | */ | ||
460 | unsigned int uninitialized_var(group), uninitialized_var(idx); | ||
461 | int foreign = get_page_ext(page, &group, &idx); | ||
462 | unsigned long bytes; | 402 | unsigned long bytes; |
463 | 403 | ||
464 | /* Data must not cross a page boundary. */ | 404 | /* Data must not cross a page boundary. */ |
@@ -494,20 +434,9 @@ static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, | |||
494 | 434 | ||
495 | copy_gop = npo->copy + npo->copy_prod++; | 435 | copy_gop = npo->copy + npo->copy_prod++; |
496 | copy_gop->flags = GNTCOPY_dest_gref; | 436 | copy_gop->flags = GNTCOPY_dest_gref; |
497 | if (foreign) { | 437 | copy_gop->source.domid = DOMID_SELF; |
498 | struct xen_netbk *netbk = &xen_netbk[group]; | 438 | copy_gop->source.u.gmfn = virt_to_mfn(page_address(page)); |
499 | struct pending_tx_info *src_pend; | ||
500 | 439 | ||
501 | src_pend = &netbk->pending_tx_info[idx]; | ||
502 | |||
503 | copy_gop->source.domid = src_pend->vif->domid; | ||
504 | copy_gop->source.u.ref = src_pend->req.gref; | ||
505 | copy_gop->flags |= GNTCOPY_source_gref; | ||
506 | } else { | ||
507 | void *vaddr = page_address(page); | ||
508 | copy_gop->source.domid = DOMID_SELF; | ||
509 | copy_gop->source.u.gmfn = virt_to_mfn(vaddr); | ||
510 | } | ||
511 | copy_gop->source.offset = offset; | 440 | copy_gop->source.offset = offset; |
512 | copy_gop->dest.domid = vif->domid; | 441 | copy_gop->dest.domid = vif->domid; |
513 | 442 | ||
@@ -1047,7 +976,6 @@ static struct page *xen_netbk_alloc_page(struct xen_netbk *netbk, | |||
1047 | page = alloc_page(GFP_KERNEL|__GFP_COLD); | 976 | page = alloc_page(GFP_KERNEL|__GFP_COLD); |
1048 | if (!page) | 977 | if (!page) |
1049 | return NULL; | 978 | return NULL; |
1050 | set_page_ext(page, netbk, pending_idx); | ||
1051 | netbk->mmap_pages[pending_idx] = page; | 979 | netbk->mmap_pages[pending_idx] = page; |
1052 | return page; | 980 | return page; |
1053 | } | 981 | } |
@@ -1155,7 +1083,6 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk, | |||
1155 | first->req.offset = 0; | 1083 | first->req.offset = 0; |
1156 | first->req.size = dst_offset; | 1084 | first->req.size = dst_offset; |
1157 | first->head = start_idx; | 1085 | first->head = start_idx; |
1158 | set_page_ext(page, netbk, head_idx); | ||
1159 | netbk->mmap_pages[head_idx] = page; | 1086 | netbk->mmap_pages[head_idx] = page; |
1160 | frag_set_pending_idx(&frags[shinfo->nr_frags], head_idx); | 1087 | frag_set_pending_idx(&frags[shinfo->nr_frags], head_idx); |
1161 | } | 1088 | } |