diff options
Diffstat (limited to 'drivers/net/xen-netback/netback.c')
-rw-r--r-- | drivers/net/xen-netback/netback.c | 710 |
1 files changed, 353 insertions, 357 deletions
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 7367208ee8cd..a5484e8cb06e 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c | |||
@@ -70,33 +70,33 @@ unsigned int rx_drain_timeout_jiffies; | |||
70 | static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT; | 70 | static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT; |
71 | module_param(fatal_skb_slots, uint, 0444); | 71 | module_param(fatal_skb_slots, uint, 0444); |
72 | 72 | ||
73 | static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx, | 73 | static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, |
74 | u8 status); | 74 | u8 status); |
75 | 75 | ||
76 | static void make_tx_response(struct xenvif *vif, | 76 | static void make_tx_response(struct xenvif_queue *queue, |
77 | struct xen_netif_tx_request *txp, | 77 | struct xen_netif_tx_request *txp, |
78 | s8 st); | 78 | s8 st); |
79 | 79 | ||
80 | static inline int tx_work_todo(struct xenvif *vif); | 80 | static inline int tx_work_todo(struct xenvif_queue *queue); |
81 | static inline int rx_work_todo(struct xenvif *vif); | 81 | static inline int rx_work_todo(struct xenvif_queue *queue); |
82 | 82 | ||
83 | static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif, | 83 | static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue, |
84 | u16 id, | 84 | u16 id, |
85 | s8 st, | 85 | s8 st, |
86 | u16 offset, | 86 | u16 offset, |
87 | u16 size, | 87 | u16 size, |
88 | u16 flags); | 88 | u16 flags); |
89 | 89 | ||
90 | static inline unsigned long idx_to_pfn(struct xenvif *vif, | 90 | static inline unsigned long idx_to_pfn(struct xenvif_queue *queue, |
91 | u16 idx) | 91 | u16 idx) |
92 | { | 92 | { |
93 | return page_to_pfn(vif->mmap_pages[idx]); | 93 | return page_to_pfn(queue->mmap_pages[idx]); |
94 | } | 94 | } |
95 | 95 | ||
96 | static inline unsigned long idx_to_kaddr(struct xenvif *vif, | 96 | static inline unsigned long idx_to_kaddr(struct xenvif_queue *queue, |
97 | u16 idx) | 97 | u16 idx) |
98 | { | 98 | { |
99 | return (unsigned long)pfn_to_kaddr(idx_to_pfn(vif, idx)); | 99 | return (unsigned long)pfn_to_kaddr(idx_to_pfn(queue, idx)); |
100 | } | 100 | } |
101 | 101 | ||
102 | #define callback_param(vif, pending_idx) \ | 102 | #define callback_param(vif, pending_idx) \ |
@@ -104,13 +104,13 @@ static inline unsigned long idx_to_kaddr(struct xenvif *vif, | |||
104 | 104 | ||
105 | /* Find the containing VIF's structure from a pointer in pending_tx_info array | 105 | /* Find the containing VIF's structure from a pointer in pending_tx_info array |
106 | */ | 106 | */ |
107 | static inline struct xenvif *ubuf_to_vif(const struct ubuf_info *ubuf) | 107 | static inline struct xenvif_queue *ubuf_to_queue(const struct ubuf_info *ubuf) |
108 | { | 108 | { |
109 | u16 pending_idx = ubuf->desc; | 109 | u16 pending_idx = ubuf->desc; |
110 | struct pending_tx_info *temp = | 110 | struct pending_tx_info *temp = |
111 | container_of(ubuf, struct pending_tx_info, callback_struct); | 111 | container_of(ubuf, struct pending_tx_info, callback_struct); |
112 | return container_of(temp - pending_idx, | 112 | return container_of(temp - pending_idx, |
113 | struct xenvif, | 113 | struct xenvif_queue, |
114 | pending_tx_info[0]); | 114 | pending_tx_info[0]); |
115 | } | 115 | } |
116 | 116 | ||
@@ -136,24 +136,24 @@ static inline pending_ring_idx_t pending_index(unsigned i) | |||
136 | return i & (MAX_PENDING_REQS-1); | 136 | return i & (MAX_PENDING_REQS-1); |
137 | } | 137 | } |
138 | 138 | ||
139 | bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed) | 139 | bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue, int needed) |
140 | { | 140 | { |
141 | RING_IDX prod, cons; | 141 | RING_IDX prod, cons; |
142 | 142 | ||
143 | do { | 143 | do { |
144 | prod = vif->rx.sring->req_prod; | 144 | prod = queue->rx.sring->req_prod; |
145 | cons = vif->rx.req_cons; | 145 | cons = queue->rx.req_cons; |
146 | 146 | ||
147 | if (prod - cons >= needed) | 147 | if (prod - cons >= needed) |
148 | return true; | 148 | return true; |
149 | 149 | ||
150 | vif->rx.sring->req_event = prod + 1; | 150 | queue->rx.sring->req_event = prod + 1; |
151 | 151 | ||
152 | /* Make sure event is visible before we check prod | 152 | /* Make sure event is visible before we check prod |
153 | * again. | 153 | * again. |
154 | */ | 154 | */ |
155 | mb(); | 155 | mb(); |
156 | } while (vif->rx.sring->req_prod != prod); | 156 | } while (queue->rx.sring->req_prod != prod); |
157 | 157 | ||
158 | return false; | 158 | return false; |
159 | } | 159 | } |
@@ -207,13 +207,13 @@ struct netrx_pending_operations { | |||
207 | grant_ref_t copy_gref; | 207 | grant_ref_t copy_gref; |
208 | }; | 208 | }; |
209 | 209 | ||
210 | static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif, | 210 | static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue, |
211 | struct netrx_pending_operations *npo) | 211 | struct netrx_pending_operations *npo) |
212 | { | 212 | { |
213 | struct xenvif_rx_meta *meta; | 213 | struct xenvif_rx_meta *meta; |
214 | struct xen_netif_rx_request *req; | 214 | struct xen_netif_rx_request *req; |
215 | 215 | ||
216 | req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); | 216 | req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++); |
217 | 217 | ||
218 | meta = npo->meta + npo->meta_prod++; | 218 | meta = npo->meta + npo->meta_prod++; |
219 | meta->gso_type = XEN_NETIF_GSO_TYPE_NONE; | 219 | meta->gso_type = XEN_NETIF_GSO_TYPE_NONE; |
@@ -231,11 +231,11 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif, | |||
231 | * Set up the grant operations for this fragment. If it's a flipping | 231 | * Set up the grant operations for this fragment. If it's a flipping |
232 | * interface, we also set up the unmap request from here. | 232 | * interface, we also set up the unmap request from here. |
233 | */ | 233 | */ |
234 | static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, | 234 | static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb, |
235 | struct netrx_pending_operations *npo, | 235 | struct netrx_pending_operations *npo, |
236 | struct page *page, unsigned long size, | 236 | struct page *page, unsigned long size, |
237 | unsigned long offset, int *head, | 237 | unsigned long offset, int *head, |
238 | struct xenvif *foreign_vif, | 238 | struct xenvif_queue *foreign_queue, |
239 | grant_ref_t foreign_gref) | 239 | grant_ref_t foreign_gref) |
240 | { | 240 | { |
241 | struct gnttab_copy *copy_gop; | 241 | struct gnttab_copy *copy_gop; |
@@ -268,7 +268,7 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, | |||
268 | */ | 268 | */ |
269 | BUG_ON(*head); | 269 | BUG_ON(*head); |
270 | 270 | ||
271 | meta = get_next_rx_buffer(vif, npo); | 271 | meta = get_next_rx_buffer(queue, npo); |
272 | } | 272 | } |
273 | 273 | ||
274 | if (npo->copy_off + bytes > MAX_BUFFER_OFFSET) | 274 | if (npo->copy_off + bytes > MAX_BUFFER_OFFSET) |
@@ -278,8 +278,8 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, | |||
278 | copy_gop->flags = GNTCOPY_dest_gref; | 278 | copy_gop->flags = GNTCOPY_dest_gref; |
279 | copy_gop->len = bytes; | 279 | copy_gop->len = bytes; |
280 | 280 | ||
281 | if (foreign_vif) { | 281 | if (foreign_queue) { |
282 | copy_gop->source.domid = foreign_vif->domid; | 282 | copy_gop->source.domid = foreign_queue->vif->domid; |
283 | copy_gop->source.u.ref = foreign_gref; | 283 | copy_gop->source.u.ref = foreign_gref; |
284 | copy_gop->flags |= GNTCOPY_source_gref; | 284 | copy_gop->flags |= GNTCOPY_source_gref; |
285 | } else { | 285 | } else { |
@@ -289,7 +289,7 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, | |||
289 | } | 289 | } |
290 | copy_gop->source.offset = offset; | 290 | copy_gop->source.offset = offset; |
291 | 291 | ||
292 | copy_gop->dest.domid = vif->domid; | 292 | copy_gop->dest.domid = queue->vif->domid; |
293 | copy_gop->dest.offset = npo->copy_off; | 293 | copy_gop->dest.offset = npo->copy_off; |
294 | copy_gop->dest.u.ref = npo->copy_gref; | 294 | copy_gop->dest.u.ref = npo->copy_gref; |
295 | 295 | ||
@@ -314,8 +314,8 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, | |||
314 | gso_type = XEN_NETIF_GSO_TYPE_TCPV6; | 314 | gso_type = XEN_NETIF_GSO_TYPE_TCPV6; |
315 | } | 315 | } |
316 | 316 | ||
317 | if (*head && ((1 << gso_type) & vif->gso_mask)) | 317 | if (*head && ((1 << gso_type) & queue->vif->gso_mask)) |
318 | vif->rx.req_cons++; | 318 | queue->rx.req_cons++; |
319 | 319 | ||
320 | *head = 0; /* There must be something in this buffer now. */ | 320 | *head = 0; /* There must be something in this buffer now. */ |
321 | 321 | ||
@@ -337,13 +337,13 @@ static const struct ubuf_info *xenvif_find_gref(const struct sk_buff *const skb, | |||
337 | const int i, | 337 | const int i, |
338 | const struct ubuf_info *ubuf) | 338 | const struct ubuf_info *ubuf) |
339 | { | 339 | { |
340 | struct xenvif *foreign_vif = ubuf_to_vif(ubuf); | 340 | struct xenvif_queue *foreign_queue = ubuf_to_queue(ubuf); |
341 | 341 | ||
342 | do { | 342 | do { |
343 | u16 pending_idx = ubuf->desc; | 343 | u16 pending_idx = ubuf->desc; |
344 | 344 | ||
345 | if (skb_shinfo(skb)->frags[i].page.p == | 345 | if (skb_shinfo(skb)->frags[i].page.p == |
346 | foreign_vif->mmap_pages[pending_idx]) | 346 | foreign_queue->mmap_pages[pending_idx]) |
347 | break; | 347 | break; |
348 | ubuf = (struct ubuf_info *) ubuf->ctx; | 348 | ubuf = (struct ubuf_info *) ubuf->ctx; |
349 | } while (ubuf); | 349 | } while (ubuf); |
@@ -364,7 +364,8 @@ static const struct ubuf_info *xenvif_find_gref(const struct sk_buff *const skb, | |||
364 | * frontend-side LRO). | 364 | * frontend-side LRO). |
365 | */ | 365 | */ |
366 | static int xenvif_gop_skb(struct sk_buff *skb, | 366 | static int xenvif_gop_skb(struct sk_buff *skb, |
367 | struct netrx_pending_operations *npo) | 367 | struct netrx_pending_operations *npo, |
368 | struct xenvif_queue *queue) | ||
368 | { | 369 | { |
369 | struct xenvif *vif = netdev_priv(skb->dev); | 370 | struct xenvif *vif = netdev_priv(skb->dev); |
370 | int nr_frags = skb_shinfo(skb)->nr_frags; | 371 | int nr_frags = skb_shinfo(skb)->nr_frags; |
@@ -390,7 +391,7 @@ static int xenvif_gop_skb(struct sk_buff *skb, | |||
390 | 391 | ||
391 | /* Set up a GSO prefix descriptor, if necessary */ | 392 | /* Set up a GSO prefix descriptor, if necessary */ |
392 | if ((1 << gso_type) & vif->gso_prefix_mask) { | 393 | if ((1 << gso_type) & vif->gso_prefix_mask) { |
393 | req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); | 394 | req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++); |
394 | meta = npo->meta + npo->meta_prod++; | 395 | meta = npo->meta + npo->meta_prod++; |
395 | meta->gso_type = gso_type; | 396 | meta->gso_type = gso_type; |
396 | meta->gso_size = skb_shinfo(skb)->gso_size; | 397 | meta->gso_size = skb_shinfo(skb)->gso_size; |
@@ -398,7 +399,7 @@ static int xenvif_gop_skb(struct sk_buff *skb, | |||
398 | meta->id = req->id; | 399 | meta->id = req->id; |
399 | } | 400 | } |
400 | 401 | ||
401 | req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); | 402 | req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++); |
402 | meta = npo->meta + npo->meta_prod++; | 403 | meta = npo->meta + npo->meta_prod++; |
403 | 404 | ||
404 | if ((1 << gso_type) & vif->gso_mask) { | 405 | if ((1 << gso_type) & vif->gso_mask) { |
@@ -422,7 +423,7 @@ static int xenvif_gop_skb(struct sk_buff *skb, | |||
422 | if (data + len > skb_tail_pointer(skb)) | 423 | if (data + len > skb_tail_pointer(skb)) |
423 | len = skb_tail_pointer(skb) - data; | 424 | len = skb_tail_pointer(skb) - data; |
424 | 425 | ||
425 | xenvif_gop_frag_copy(vif, skb, npo, | 426 | xenvif_gop_frag_copy(queue, skb, npo, |
426 | virt_to_page(data), len, offset, &head, | 427 | virt_to_page(data), len, offset, &head, |
427 | NULL, | 428 | NULL, |
428 | 0); | 429 | 0); |
@@ -433,7 +434,7 @@ static int xenvif_gop_skb(struct sk_buff *skb, | |||
433 | /* This variable also signals whether foreign_gref has a real | 434 | /* This variable also signals whether foreign_gref has a real |
434 | * value or not. | 435 | * value or not. |
435 | */ | 436 | */ |
436 | struct xenvif *foreign_vif = NULL; | 437 | struct xenvif_queue *foreign_queue = NULL; |
437 | grant_ref_t foreign_gref; | 438 | grant_ref_t foreign_gref; |
438 | 439 | ||
439 | if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) && | 440 | if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) && |
@@ -458,8 +459,9 @@ static int xenvif_gop_skb(struct sk_buff *skb, | |||
458 | if (likely(ubuf)) { | 459 | if (likely(ubuf)) { |
459 | u16 pending_idx = ubuf->desc; | 460 | u16 pending_idx = ubuf->desc; |
460 | 461 | ||
461 | foreign_vif = ubuf_to_vif(ubuf); | 462 | foreign_queue = ubuf_to_queue(ubuf); |
462 | foreign_gref = foreign_vif->pending_tx_info[pending_idx].req.gref; | 463 | foreign_gref = |
464 | foreign_queue->pending_tx_info[pending_idx].req.gref; | ||
463 | /* Just a safety measure. If this was the last | 465 | /* Just a safety measure. If this was the last |
464 | * element on the list, the for loop will | 466 | * element on the list, the for loop will |
465 | * iterate again if a local page were added to | 467 | * iterate again if a local page were added to |
@@ -477,13 +479,13 @@ static int xenvif_gop_skb(struct sk_buff *skb, | |||
477 | */ | 479 | */ |
478 | ubuf = head_ubuf; | 480 | ubuf = head_ubuf; |
479 | } | 481 | } |
480 | xenvif_gop_frag_copy(vif, skb, npo, | 482 | xenvif_gop_frag_copy(queue, skb, npo, |
481 | skb_frag_page(&skb_shinfo(skb)->frags[i]), | 483 | skb_frag_page(&skb_shinfo(skb)->frags[i]), |
482 | skb_frag_size(&skb_shinfo(skb)->frags[i]), | 484 | skb_frag_size(&skb_shinfo(skb)->frags[i]), |
483 | skb_shinfo(skb)->frags[i].page_offset, | 485 | skb_shinfo(skb)->frags[i].page_offset, |
484 | &head, | 486 | &head, |
485 | foreign_vif, | 487 | foreign_queue, |
486 | foreign_vif ? foreign_gref : UINT_MAX); | 488 | foreign_queue ? foreign_gref : UINT_MAX); |
487 | } | 489 | } |
488 | 490 | ||
489 | return npo->meta_prod - old_meta_prod; | 491 | return npo->meta_prod - old_meta_prod; |
@@ -515,7 +517,7 @@ static int xenvif_check_gop(struct xenvif *vif, int nr_meta_slots, | |||
515 | return status; | 517 | return status; |
516 | } | 518 | } |
517 | 519 | ||
518 | static void xenvif_add_frag_responses(struct xenvif *vif, int status, | 520 | static void xenvif_add_frag_responses(struct xenvif_queue *queue, int status, |
519 | struct xenvif_rx_meta *meta, | 521 | struct xenvif_rx_meta *meta, |
520 | int nr_meta_slots) | 522 | int nr_meta_slots) |
521 | { | 523 | { |
@@ -536,7 +538,7 @@ static void xenvif_add_frag_responses(struct xenvif *vif, int status, | |||
536 | flags = XEN_NETRXF_more_data; | 538 | flags = XEN_NETRXF_more_data; |
537 | 539 | ||
538 | offset = 0; | 540 | offset = 0; |
539 | make_rx_response(vif, meta[i].id, status, offset, | 541 | make_rx_response(queue, meta[i].id, status, offset, |
540 | meta[i].size, flags); | 542 | meta[i].size, flags); |
541 | } | 543 | } |
542 | } | 544 | } |
@@ -547,12 +549,12 @@ struct xenvif_rx_cb { | |||
547 | 549 | ||
548 | #define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb) | 550 | #define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb) |
549 | 551 | ||
550 | void xenvif_kick_thread(struct xenvif *vif) | 552 | void xenvif_kick_thread(struct xenvif_queue *queue) |
551 | { | 553 | { |
552 | wake_up(&vif->wq); | 554 | wake_up(&queue->wq); |
553 | } | 555 | } |
554 | 556 | ||
555 | static void xenvif_rx_action(struct xenvif *vif) | 557 | static void xenvif_rx_action(struct xenvif_queue *queue) |
556 | { | 558 | { |
557 | s8 status; | 559 | s8 status; |
558 | u16 flags; | 560 | u16 flags; |
@@ -565,13 +567,13 @@ static void xenvif_rx_action(struct xenvif *vif) | |||
565 | bool need_to_notify = false; | 567 | bool need_to_notify = false; |
566 | 568 | ||
567 | struct netrx_pending_operations npo = { | 569 | struct netrx_pending_operations npo = { |
568 | .copy = vif->grant_copy_op, | 570 | .copy = queue->grant_copy_op, |
569 | .meta = vif->meta, | 571 | .meta = queue->meta, |
570 | }; | 572 | }; |
571 | 573 | ||
572 | skb_queue_head_init(&rxq); | 574 | skb_queue_head_init(&rxq); |
573 | 575 | ||
574 | while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) { | 576 | while ((skb = skb_dequeue(&queue->rx_queue)) != NULL) { |
575 | RING_IDX max_slots_needed; | 577 | RING_IDX max_slots_needed; |
576 | RING_IDX old_req_cons; | 578 | RING_IDX old_req_cons; |
577 | RING_IDX ring_slots_used; | 579 | RING_IDX ring_slots_used; |
@@ -614,42 +616,42 @@ static void xenvif_rx_action(struct xenvif *vif) | |||
614 | max_slots_needed++; | 616 | max_slots_needed++; |
615 | 617 | ||
616 | /* If the skb may not fit then bail out now */ | 618 | /* If the skb may not fit then bail out now */ |
617 | if (!xenvif_rx_ring_slots_available(vif, max_slots_needed)) { | 619 | if (!xenvif_rx_ring_slots_available(queue, max_slots_needed)) { |
618 | skb_queue_head(&vif->rx_queue, skb); | 620 | skb_queue_head(&queue->rx_queue, skb); |
619 | need_to_notify = true; | 621 | need_to_notify = true; |
620 | vif->rx_last_skb_slots = max_slots_needed; | 622 | queue->rx_last_skb_slots = max_slots_needed; |
621 | break; | 623 | break; |
622 | } else | 624 | } else |
623 | vif->rx_last_skb_slots = 0; | 625 | queue->rx_last_skb_slots = 0; |
624 | 626 | ||
625 | old_req_cons = vif->rx.req_cons; | 627 | old_req_cons = queue->rx.req_cons; |
626 | XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo); | 628 | XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo, queue); |
627 | ring_slots_used = vif->rx.req_cons - old_req_cons; | 629 | ring_slots_used = queue->rx.req_cons - old_req_cons; |
628 | 630 | ||
629 | BUG_ON(ring_slots_used > max_slots_needed); | 631 | BUG_ON(ring_slots_used > max_slots_needed); |
630 | 632 | ||
631 | __skb_queue_tail(&rxq, skb); | 633 | __skb_queue_tail(&rxq, skb); |
632 | } | 634 | } |
633 | 635 | ||
634 | BUG_ON(npo.meta_prod > ARRAY_SIZE(vif->meta)); | 636 | BUG_ON(npo.meta_prod > ARRAY_SIZE(queue->meta)); |
635 | 637 | ||
636 | if (!npo.copy_prod) | 638 | if (!npo.copy_prod) |
637 | goto done; | 639 | goto done; |
638 | 640 | ||
639 | BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS); | 641 | BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS); |
640 | gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod); | 642 | gnttab_batch_copy(queue->grant_copy_op, npo.copy_prod); |
641 | 643 | ||
642 | while ((skb = __skb_dequeue(&rxq)) != NULL) { | 644 | while ((skb = __skb_dequeue(&rxq)) != NULL) { |
643 | 645 | ||
644 | if ((1 << vif->meta[npo.meta_cons].gso_type) & | 646 | if ((1 << queue->meta[npo.meta_cons].gso_type) & |
645 | vif->gso_prefix_mask) { | 647 | queue->vif->gso_prefix_mask) { |
646 | resp = RING_GET_RESPONSE(&vif->rx, | 648 | resp = RING_GET_RESPONSE(&queue->rx, |
647 | vif->rx.rsp_prod_pvt++); | 649 | queue->rx.rsp_prod_pvt++); |
648 | 650 | ||
649 | resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data; | 651 | resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data; |
650 | 652 | ||
651 | resp->offset = vif->meta[npo.meta_cons].gso_size; | 653 | resp->offset = queue->meta[npo.meta_cons].gso_size; |
652 | resp->id = vif->meta[npo.meta_cons].id; | 654 | resp->id = queue->meta[npo.meta_cons].id; |
653 | resp->status = XENVIF_RX_CB(skb)->meta_slots_used; | 655 | resp->status = XENVIF_RX_CB(skb)->meta_slots_used; |
654 | 656 | ||
655 | npo.meta_cons++; | 657 | npo.meta_cons++; |
@@ -657,10 +659,10 @@ static void xenvif_rx_action(struct xenvif *vif) | |||
657 | } | 659 | } |
658 | 660 | ||
659 | 661 | ||
660 | vif->dev->stats.tx_bytes += skb->len; | 662 | queue->stats.tx_bytes += skb->len; |
661 | vif->dev->stats.tx_packets++; | 663 | queue->stats.tx_packets++; |
662 | 664 | ||
663 | status = xenvif_check_gop(vif, | 665 | status = xenvif_check_gop(queue->vif, |
664 | XENVIF_RX_CB(skb)->meta_slots_used, | 666 | XENVIF_RX_CB(skb)->meta_slots_used, |
665 | &npo); | 667 | &npo); |
666 | 668 | ||
@@ -676,22 +678,22 @@ static void xenvif_rx_action(struct xenvif *vif) | |||
676 | flags |= XEN_NETRXF_data_validated; | 678 | flags |= XEN_NETRXF_data_validated; |
677 | 679 | ||
678 | offset = 0; | 680 | offset = 0; |
679 | resp = make_rx_response(vif, vif->meta[npo.meta_cons].id, | 681 | resp = make_rx_response(queue, queue->meta[npo.meta_cons].id, |
680 | status, offset, | 682 | status, offset, |
681 | vif->meta[npo.meta_cons].size, | 683 | queue->meta[npo.meta_cons].size, |
682 | flags); | 684 | flags); |
683 | 685 | ||
684 | if ((1 << vif->meta[npo.meta_cons].gso_type) & | 686 | if ((1 << queue->meta[npo.meta_cons].gso_type) & |
685 | vif->gso_mask) { | 687 | queue->vif->gso_mask) { |
686 | struct xen_netif_extra_info *gso = | 688 | struct xen_netif_extra_info *gso = |
687 | (struct xen_netif_extra_info *) | 689 | (struct xen_netif_extra_info *) |
688 | RING_GET_RESPONSE(&vif->rx, | 690 | RING_GET_RESPONSE(&queue->rx, |
689 | vif->rx.rsp_prod_pvt++); | 691 | queue->rx.rsp_prod_pvt++); |
690 | 692 | ||
691 | resp->flags |= XEN_NETRXF_extra_info; | 693 | resp->flags |= XEN_NETRXF_extra_info; |
692 | 694 | ||
693 | gso->u.gso.type = vif->meta[npo.meta_cons].gso_type; | 695 | gso->u.gso.type = queue->meta[npo.meta_cons].gso_type; |
694 | gso->u.gso.size = vif->meta[npo.meta_cons].gso_size; | 696 | gso->u.gso.size = queue->meta[npo.meta_cons].gso_size; |
695 | gso->u.gso.pad = 0; | 697 | gso->u.gso.pad = 0; |
696 | gso->u.gso.features = 0; | 698 | gso->u.gso.features = 0; |
697 | 699 | ||
@@ -699,11 +701,11 @@ static void xenvif_rx_action(struct xenvif *vif) | |||
699 | gso->flags = 0; | 701 | gso->flags = 0; |
700 | } | 702 | } |
701 | 703 | ||
702 | xenvif_add_frag_responses(vif, status, | 704 | xenvif_add_frag_responses(queue, status, |
703 | vif->meta + npo.meta_cons + 1, | 705 | queue->meta + npo.meta_cons + 1, |
704 | XENVIF_RX_CB(skb)->meta_slots_used); | 706 | XENVIF_RX_CB(skb)->meta_slots_used); |
705 | 707 | ||
706 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret); | 708 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, ret); |
707 | 709 | ||
708 | need_to_notify |= !!ret; | 710 | need_to_notify |= !!ret; |
709 | 711 | ||
@@ -713,20 +715,20 @@ static void xenvif_rx_action(struct xenvif *vif) | |||
713 | 715 | ||
714 | done: | 716 | done: |
715 | if (need_to_notify) | 717 | if (need_to_notify) |
716 | notify_remote_via_irq(vif->rx_irq); | 718 | notify_remote_via_irq(queue->rx_irq); |
717 | } | 719 | } |
718 | 720 | ||
719 | void xenvif_napi_schedule_or_enable_events(struct xenvif *vif) | 721 | void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue) |
720 | { | 722 | { |
721 | int more_to_do; | 723 | int more_to_do; |
722 | 724 | ||
723 | RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do); | 725 | RING_FINAL_CHECK_FOR_REQUESTS(&queue->tx, more_to_do); |
724 | 726 | ||
725 | if (more_to_do) | 727 | if (more_to_do) |
726 | napi_schedule(&vif->napi); | 728 | napi_schedule(&queue->napi); |
727 | } | 729 | } |
728 | 730 | ||
729 | static void tx_add_credit(struct xenvif *vif) | 731 | static void tx_add_credit(struct xenvif_queue *queue) |
730 | { | 732 | { |
731 | unsigned long max_burst, max_credit; | 733 | unsigned long max_burst, max_credit; |
732 | 734 | ||
@@ -734,55 +736,57 @@ static void tx_add_credit(struct xenvif *vif) | |||
734 | * Allow a burst big enough to transmit a jumbo packet of up to 128kB. | 736 | * Allow a burst big enough to transmit a jumbo packet of up to 128kB. |
735 | * Otherwise the interface can seize up due to insufficient credit. | 737 | * Otherwise the interface can seize up due to insufficient credit. |
736 | */ | 738 | */ |
737 | max_burst = RING_GET_REQUEST(&vif->tx, vif->tx.req_cons)->size; | 739 | max_burst = RING_GET_REQUEST(&queue->tx, queue->tx.req_cons)->size; |
738 | max_burst = min(max_burst, 131072UL); | 740 | max_burst = min(max_burst, 131072UL); |
739 | max_burst = max(max_burst, vif->credit_bytes); | 741 | max_burst = max(max_burst, queue->credit_bytes); |
740 | 742 | ||
741 | /* Take care that adding a new chunk of credit doesn't wrap to zero. */ | 743 | /* Take care that adding a new chunk of credit doesn't wrap to zero. */ |
742 | max_credit = vif->remaining_credit + vif->credit_bytes; | 744 | max_credit = queue->remaining_credit + queue->credit_bytes; |
743 | if (max_credit < vif->remaining_credit) | 745 | if (max_credit < queue->remaining_credit) |
744 | max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */ | 746 | max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */ |
745 | 747 | ||
746 | vif->remaining_credit = min(max_credit, max_burst); | 748 | queue->remaining_credit = min(max_credit, max_burst); |
747 | } | 749 | } |
748 | 750 | ||
749 | static void tx_credit_callback(unsigned long data) | 751 | static void tx_credit_callback(unsigned long data) |
750 | { | 752 | { |
751 | struct xenvif *vif = (struct xenvif *)data; | 753 | struct xenvif_queue *queue = (struct xenvif_queue *)data; |
752 | tx_add_credit(vif); | 754 | tx_add_credit(queue); |
753 | xenvif_napi_schedule_or_enable_events(vif); | 755 | xenvif_napi_schedule_or_enable_events(queue); |
754 | } | 756 | } |
755 | 757 | ||
756 | static void xenvif_tx_err(struct xenvif *vif, | 758 | static void xenvif_tx_err(struct xenvif_queue *queue, |
757 | struct xen_netif_tx_request *txp, RING_IDX end) | 759 | struct xen_netif_tx_request *txp, RING_IDX end) |
758 | { | 760 | { |
759 | RING_IDX cons = vif->tx.req_cons; | 761 | RING_IDX cons = queue->tx.req_cons; |
760 | unsigned long flags; | 762 | unsigned long flags; |
761 | 763 | ||
762 | do { | 764 | do { |
763 | spin_lock_irqsave(&vif->response_lock, flags); | 765 | spin_lock_irqsave(&queue->response_lock, flags); |
764 | make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR); | 766 | make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR); |
765 | spin_unlock_irqrestore(&vif->response_lock, flags); | 767 | spin_unlock_irqrestore(&queue->response_lock, flags); |
766 | if (cons == end) | 768 | if (cons == end) |
767 | break; | 769 | break; |
768 | txp = RING_GET_REQUEST(&vif->tx, cons++); | 770 | txp = RING_GET_REQUEST(&queue->tx, cons++); |
769 | } while (1); | 771 | } while (1); |
770 | vif->tx.req_cons = cons; | 772 | queue->tx.req_cons = cons; |
771 | } | 773 | } |
772 | 774 | ||
773 | static void xenvif_fatal_tx_err(struct xenvif *vif) | 775 | static void xenvif_fatal_tx_err(struct xenvif *vif) |
774 | { | 776 | { |
775 | netdev_err(vif->dev, "fatal error; disabling device\n"); | 777 | netdev_err(vif->dev, "fatal error; disabling device\n"); |
776 | vif->disabled = true; | 778 | vif->disabled = true; |
777 | xenvif_kick_thread(vif); | 779 | /* Disable the vif from queue 0's kthread */ |
780 | if (vif->queues) | ||
781 | xenvif_kick_thread(&vif->queues[0]); | ||
778 | } | 782 | } |
779 | 783 | ||
780 | static int xenvif_count_requests(struct xenvif *vif, | 784 | static int xenvif_count_requests(struct xenvif_queue *queue, |
781 | struct xen_netif_tx_request *first, | 785 | struct xen_netif_tx_request *first, |
782 | struct xen_netif_tx_request *txp, | 786 | struct xen_netif_tx_request *txp, |
783 | int work_to_do) | 787 | int work_to_do) |
784 | { | 788 | { |
785 | RING_IDX cons = vif->tx.req_cons; | 789 | RING_IDX cons = queue->tx.req_cons; |
786 | int slots = 0; | 790 | int slots = 0; |
787 | int drop_err = 0; | 791 | int drop_err = 0; |
788 | int more_data; | 792 | int more_data; |
@@ -794,10 +798,10 @@ static int xenvif_count_requests(struct xenvif *vif, | |||
794 | struct xen_netif_tx_request dropped_tx = { 0 }; | 798 | struct xen_netif_tx_request dropped_tx = { 0 }; |
795 | 799 | ||
796 | if (slots >= work_to_do) { | 800 | if (slots >= work_to_do) { |
797 | netdev_err(vif->dev, | 801 | netdev_err(queue->vif->dev, |
798 | "Asked for %d slots but exceeds this limit\n", | 802 | "Asked for %d slots but exceeds this limit\n", |
799 | work_to_do); | 803 | work_to_do); |
800 | xenvif_fatal_tx_err(vif); | 804 | xenvif_fatal_tx_err(queue->vif); |
801 | return -ENODATA; | 805 | return -ENODATA; |
802 | } | 806 | } |
803 | 807 | ||
@@ -805,10 +809,10 @@ static int xenvif_count_requests(struct xenvif *vif, | |||
805 | * considered malicious. | 809 | * considered malicious. |
806 | */ | 810 | */ |
807 | if (unlikely(slots >= fatal_skb_slots)) { | 811 | if (unlikely(slots >= fatal_skb_slots)) { |
808 | netdev_err(vif->dev, | 812 | netdev_err(queue->vif->dev, |
809 | "Malicious frontend using %d slots, threshold %u\n", | 813 | "Malicious frontend using %d slots, threshold %u\n", |
810 | slots, fatal_skb_slots); | 814 | slots, fatal_skb_slots); |
811 | xenvif_fatal_tx_err(vif); | 815 | xenvif_fatal_tx_err(queue->vif); |
812 | return -E2BIG; | 816 | return -E2BIG; |
813 | } | 817 | } |
814 | 818 | ||
@@ -821,7 +825,7 @@ static int xenvif_count_requests(struct xenvif *vif, | |||
821 | */ | 825 | */ |
822 | if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) { | 826 | if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) { |
823 | if (net_ratelimit()) | 827 | if (net_ratelimit()) |
824 | netdev_dbg(vif->dev, | 828 | netdev_dbg(queue->vif->dev, |
825 | "Too many slots (%d) exceeding limit (%d), dropping packet\n", | 829 | "Too many slots (%d) exceeding limit (%d), dropping packet\n", |
826 | slots, XEN_NETBK_LEGACY_SLOTS_MAX); | 830 | slots, XEN_NETBK_LEGACY_SLOTS_MAX); |
827 | drop_err = -E2BIG; | 831 | drop_err = -E2BIG; |
@@ -830,7 +834,7 @@ static int xenvif_count_requests(struct xenvif *vif, | |||
830 | if (drop_err) | 834 | if (drop_err) |
831 | txp = &dropped_tx; | 835 | txp = &dropped_tx; |
832 | 836 | ||
833 | memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + slots), | 837 | memcpy(txp, RING_GET_REQUEST(&queue->tx, cons + slots), |
834 | sizeof(*txp)); | 838 | sizeof(*txp)); |
835 | 839 | ||
836 | /* If the guest submitted a frame >= 64 KiB then | 840 | /* If the guest submitted a frame >= 64 KiB then |
@@ -844,7 +848,7 @@ static int xenvif_count_requests(struct xenvif *vif, | |||
844 | */ | 848 | */ |
845 | if (!drop_err && txp->size > first->size) { | 849 | if (!drop_err && txp->size > first->size) { |
846 | if (net_ratelimit()) | 850 | if (net_ratelimit()) |
847 | netdev_dbg(vif->dev, | 851 | netdev_dbg(queue->vif->dev, |
848 | "Invalid tx request, slot size %u > remaining size %u\n", | 852 | "Invalid tx request, slot size %u > remaining size %u\n", |
849 | txp->size, first->size); | 853 | txp->size, first->size); |
850 | drop_err = -EIO; | 854 | drop_err = -EIO; |
@@ -854,9 +858,9 @@ static int xenvif_count_requests(struct xenvif *vif, | |||
854 | slots++; | 858 | slots++; |
855 | 859 | ||
856 | if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) { | 860 | if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) { |
857 | netdev_err(vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n", | 861 | netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n", |
858 | txp->offset, txp->size); | 862 | txp->offset, txp->size); |
859 | xenvif_fatal_tx_err(vif); | 863 | xenvif_fatal_tx_err(queue->vif); |
860 | return -EINVAL; | 864 | return -EINVAL; |
861 | } | 865 | } |
862 | 866 | ||
@@ -868,7 +872,7 @@ static int xenvif_count_requests(struct xenvif *vif, | |||
868 | } while (more_data); | 872 | } while (more_data); |
869 | 873 | ||
870 | if (drop_err) { | 874 | if (drop_err) { |
871 | xenvif_tx_err(vif, first, cons + slots); | 875 | xenvif_tx_err(queue, first, cons + slots); |
872 | return drop_err; | 876 | return drop_err; |
873 | } | 877 | } |
874 | 878 | ||
@@ -882,17 +886,17 @@ struct xenvif_tx_cb { | |||
882 | 886 | ||
883 | #define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb) | 887 | #define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb) |
884 | 888 | ||
885 | static inline void xenvif_tx_create_map_op(struct xenvif *vif, | 889 | static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue, |
886 | u16 pending_idx, | 890 | u16 pending_idx, |
887 | struct xen_netif_tx_request *txp, | 891 | struct xen_netif_tx_request *txp, |
888 | struct gnttab_map_grant_ref *mop) | 892 | struct gnttab_map_grant_ref *mop) |
889 | { | 893 | { |
890 | vif->pages_to_map[mop-vif->tx_map_ops] = vif->mmap_pages[pending_idx]; | 894 | queue->pages_to_map[mop-queue->tx_map_ops] = queue->mmap_pages[pending_idx]; |
891 | gnttab_set_map_op(mop, idx_to_kaddr(vif, pending_idx), | 895 | gnttab_set_map_op(mop, idx_to_kaddr(queue, pending_idx), |
892 | GNTMAP_host_map | GNTMAP_readonly, | 896 | GNTMAP_host_map | GNTMAP_readonly, |
893 | txp->gref, vif->domid); | 897 | txp->gref, queue->vif->domid); |
894 | 898 | ||
895 | memcpy(&vif->pending_tx_info[pending_idx].req, txp, | 899 | memcpy(&queue->pending_tx_info[pending_idx].req, txp, |
896 | sizeof(*txp)); | 900 | sizeof(*txp)); |
897 | } | 901 | } |
898 | 902 | ||
@@ -913,7 +917,7 @@ static inline struct sk_buff *xenvif_alloc_skb(unsigned int size) | |||
913 | return skb; | 917 | return skb; |
914 | } | 918 | } |
915 | 919 | ||
916 | static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif *vif, | 920 | static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue, |
917 | struct sk_buff *skb, | 921 | struct sk_buff *skb, |
918 | struct xen_netif_tx_request *txp, | 922 | struct xen_netif_tx_request *txp, |
919 | struct gnttab_map_grant_ref *gop) | 923 | struct gnttab_map_grant_ref *gop) |
@@ -940,9 +944,9 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif *vif, | |||
940 | 944 | ||
941 | for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots; | 945 | for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots; |
942 | shinfo->nr_frags++, txp++, gop++) { | 946 | shinfo->nr_frags++, txp++, gop++) { |
943 | index = pending_index(vif->pending_cons++); | 947 | index = pending_index(queue->pending_cons++); |
944 | pending_idx = vif->pending_ring[index]; | 948 | pending_idx = queue->pending_ring[index]; |
945 | xenvif_tx_create_map_op(vif, pending_idx, txp, gop); | 949 | xenvif_tx_create_map_op(queue, pending_idx, txp, gop); |
946 | frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx); | 950 | frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx); |
947 | } | 951 | } |
948 | 952 | ||
@@ -950,7 +954,7 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif *vif, | |||
950 | struct sk_buff *nskb = xenvif_alloc_skb(0); | 954 | struct sk_buff *nskb = xenvif_alloc_skb(0); |
951 | if (unlikely(nskb == NULL)) { | 955 | if (unlikely(nskb == NULL)) { |
952 | if (net_ratelimit()) | 956 | if (net_ratelimit()) |
953 | netdev_err(vif->dev, | 957 | netdev_err(queue->vif->dev, |
954 | "Can't allocate the frag_list skb.\n"); | 958 | "Can't allocate the frag_list skb.\n"); |
955 | return NULL; | 959 | return NULL; |
956 | } | 960 | } |
@@ -960,9 +964,9 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif *vif, | |||
960 | 964 | ||
961 | for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow; | 965 | for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow; |
962 | shinfo->nr_frags++, txp++, gop++) { | 966 | shinfo->nr_frags++, txp++, gop++) { |
963 | index = pending_index(vif->pending_cons++); | 967 | index = pending_index(queue->pending_cons++); |
964 | pending_idx = vif->pending_ring[index]; | 968 | pending_idx = queue->pending_ring[index]; |
965 | xenvif_tx_create_map_op(vif, pending_idx, txp, gop); | 969 | xenvif_tx_create_map_op(queue, pending_idx, txp, gop); |
966 | frag_set_pending_idx(&frags[shinfo->nr_frags], | 970 | frag_set_pending_idx(&frags[shinfo->nr_frags], |
967 | pending_idx); | 971 | pending_idx); |
968 | } | 972 | } |
@@ -973,34 +977,34 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif *vif, | |||
973 | return gop; | 977 | return gop; |
974 | } | 978 | } |
975 | 979 | ||
976 | static inline void xenvif_grant_handle_set(struct xenvif *vif, | 980 | static inline void xenvif_grant_handle_set(struct xenvif_queue *queue, |
977 | u16 pending_idx, | 981 | u16 pending_idx, |
978 | grant_handle_t handle) | 982 | grant_handle_t handle) |
979 | { | 983 | { |
980 | if (unlikely(vif->grant_tx_handle[pending_idx] != | 984 | if (unlikely(queue->grant_tx_handle[pending_idx] != |
981 | NETBACK_INVALID_HANDLE)) { | 985 | NETBACK_INVALID_HANDLE)) { |
982 | netdev_err(vif->dev, | 986 | netdev_err(queue->vif->dev, |
983 | "Trying to overwrite active handle! pending_idx: %x\n", | 987 | "Trying to overwrite active handle! pending_idx: %x\n", |
984 | pending_idx); | 988 | pending_idx); |
985 | BUG(); | 989 | BUG(); |
986 | } | 990 | } |
987 | vif->grant_tx_handle[pending_idx] = handle; | 991 | queue->grant_tx_handle[pending_idx] = handle; |
988 | } | 992 | } |
989 | 993 | ||
990 | static inline void xenvif_grant_handle_reset(struct xenvif *vif, | 994 | static inline void xenvif_grant_handle_reset(struct xenvif_queue *queue, |
991 | u16 pending_idx) | 995 | u16 pending_idx) |
992 | { | 996 | { |
993 | if (unlikely(vif->grant_tx_handle[pending_idx] == | 997 | if (unlikely(queue->grant_tx_handle[pending_idx] == |
994 | NETBACK_INVALID_HANDLE)) { | 998 | NETBACK_INVALID_HANDLE)) { |
995 | netdev_err(vif->dev, | 999 | netdev_err(queue->vif->dev, |
996 | "Trying to unmap invalid handle! pending_idx: %x\n", | 1000 | "Trying to unmap invalid handle! pending_idx: %x\n", |
997 | pending_idx); | 1001 | pending_idx); |
998 | BUG(); | 1002 | BUG(); |
999 | } | 1003 | } |
1000 | vif->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE; | 1004 | queue->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE; |
1001 | } | 1005 | } |
1002 | 1006 | ||
1003 | static int xenvif_tx_check_gop(struct xenvif *vif, | 1007 | static int xenvif_tx_check_gop(struct xenvif_queue *queue, |
1004 | struct sk_buff *skb, | 1008 | struct sk_buff *skb, |
1005 | struct gnttab_map_grant_ref **gopp_map, | 1009 | struct gnttab_map_grant_ref **gopp_map, |
1006 | struct gnttab_copy **gopp_copy) | 1010 | struct gnttab_copy **gopp_copy) |
@@ -1017,12 +1021,12 @@ static int xenvif_tx_check_gop(struct xenvif *vif, | |||
1017 | (*gopp_copy)++; | 1021 | (*gopp_copy)++; |
1018 | if (unlikely(err)) { | 1022 | if (unlikely(err)) { |
1019 | if (net_ratelimit()) | 1023 | if (net_ratelimit()) |
1020 | netdev_dbg(vif->dev, | 1024 | netdev_dbg(queue->vif->dev, |
1021 | "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n", | 1025 | "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n", |
1022 | (*gopp_copy)->status, | 1026 | (*gopp_copy)->status, |
1023 | pending_idx, | 1027 | pending_idx, |
1024 | (*gopp_copy)->source.u.ref); | 1028 | (*gopp_copy)->source.u.ref); |
1025 | xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR); | 1029 | xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR); |
1026 | } | 1030 | } |
1027 | 1031 | ||
1028 | check_frags: | 1032 | check_frags: |
@@ -1035,24 +1039,24 @@ check_frags: | |||
1035 | newerr = gop_map->status; | 1039 | newerr = gop_map->status; |
1036 | 1040 | ||
1037 | if (likely(!newerr)) { | 1041 | if (likely(!newerr)) { |
1038 | xenvif_grant_handle_set(vif, | 1042 | xenvif_grant_handle_set(queue, |
1039 | pending_idx, | 1043 | pending_idx, |
1040 | gop_map->handle); | 1044 | gop_map->handle); |
1041 | /* Had a previous error? Invalidate this fragment. */ | 1045 | /* Had a previous error? Invalidate this fragment. */ |
1042 | if (unlikely(err)) | 1046 | if (unlikely(err)) |
1043 | xenvif_idx_unmap(vif, pending_idx); | 1047 | xenvif_idx_unmap(queue, pending_idx); |
1044 | continue; | 1048 | continue; |
1045 | } | 1049 | } |
1046 | 1050 | ||
1047 | /* Error on this fragment: respond to client with an error. */ | 1051 | /* Error on this fragment: respond to client with an error. */ |
1048 | if (net_ratelimit()) | 1052 | if (net_ratelimit()) |
1049 | netdev_dbg(vif->dev, | 1053 | netdev_dbg(queue->vif->dev, |
1050 | "Grant map of %d. frag failed! status: %d pending_idx: %u ref: %u\n", | 1054 | "Grant map of %d. frag failed! status: %d pending_idx: %u ref: %u\n", |
1051 | i, | 1055 | i, |
1052 | gop_map->status, | 1056 | gop_map->status, |
1053 | pending_idx, | 1057 | pending_idx, |
1054 | gop_map->ref); | 1058 | gop_map->ref); |
1055 | xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR); | 1059 | xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR); |
1056 | 1060 | ||
1057 | /* Not the first error? Preceding frags already invalidated. */ | 1061 | /* Not the first error? Preceding frags already invalidated. */ |
1058 | if (err) | 1062 | if (err) |
@@ -1060,7 +1064,7 @@ check_frags: | |||
1060 | /* First error: invalidate preceding fragments. */ | 1064 | /* First error: invalidate preceding fragments. */ |
1061 | for (j = 0; j < i; j++) { | 1065 | for (j = 0; j < i; j++) { |
1062 | pending_idx = frag_get_pending_idx(&shinfo->frags[j]); | 1066 | pending_idx = frag_get_pending_idx(&shinfo->frags[j]); |
1063 | xenvif_idx_unmap(vif, pending_idx); | 1067 | xenvif_idx_unmap(queue, pending_idx); |
1064 | } | 1068 | } |
1065 | 1069 | ||
1066 | /* Remember the error: invalidate all subsequent fragments. */ | 1070 | /* Remember the error: invalidate all subsequent fragments. */ |
@@ -1084,7 +1088,7 @@ check_frags: | |||
1084 | shinfo = skb_shinfo(first_skb); | 1088 | shinfo = skb_shinfo(first_skb); |
1085 | for (j = 0; j < shinfo->nr_frags; j++) { | 1089 | for (j = 0; j < shinfo->nr_frags; j++) { |
1086 | pending_idx = frag_get_pending_idx(&shinfo->frags[j]); | 1090 | pending_idx = frag_get_pending_idx(&shinfo->frags[j]); |
1087 | xenvif_idx_unmap(vif, pending_idx); | 1091 | xenvif_idx_unmap(queue, pending_idx); |
1088 | } | 1092 | } |
1089 | } | 1093 | } |
1090 | 1094 | ||
@@ -1092,7 +1096,7 @@ check_frags: | |||
1092 | return err; | 1096 | return err; |
1093 | } | 1097 | } |
1094 | 1098 | ||
1095 | static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb) | 1099 | static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb) |
1096 | { | 1100 | { |
1097 | struct skb_shared_info *shinfo = skb_shinfo(skb); | 1101 | struct skb_shared_info *shinfo = skb_shinfo(skb); |
1098 | int nr_frags = shinfo->nr_frags; | 1102 | int nr_frags = shinfo->nr_frags; |
@@ -1110,23 +1114,23 @@ static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb) | |||
1110 | /* If this is not the first frag, chain it to the previous*/ | 1114 | /* If this is not the first frag, chain it to the previous*/ |
1111 | if (prev_pending_idx == INVALID_PENDING_IDX) | 1115 | if (prev_pending_idx == INVALID_PENDING_IDX) |
1112 | skb_shinfo(skb)->destructor_arg = | 1116 | skb_shinfo(skb)->destructor_arg = |
1113 | &callback_param(vif, pending_idx); | 1117 | &callback_param(queue, pending_idx); |
1114 | else | 1118 | else |
1115 | callback_param(vif, prev_pending_idx).ctx = | 1119 | callback_param(queue, prev_pending_idx).ctx = |
1116 | &callback_param(vif, pending_idx); | 1120 | &callback_param(queue, pending_idx); |
1117 | 1121 | ||
1118 | callback_param(vif, pending_idx).ctx = NULL; | 1122 | callback_param(queue, pending_idx).ctx = NULL; |
1119 | prev_pending_idx = pending_idx; | 1123 | prev_pending_idx = pending_idx; |
1120 | 1124 | ||
1121 | txp = &vif->pending_tx_info[pending_idx].req; | 1125 | txp = &queue->pending_tx_info[pending_idx].req; |
1122 | page = virt_to_page(idx_to_kaddr(vif, pending_idx)); | 1126 | page = virt_to_page(idx_to_kaddr(queue, pending_idx)); |
1123 | __skb_fill_page_desc(skb, i, page, txp->offset, txp->size); | 1127 | __skb_fill_page_desc(skb, i, page, txp->offset, txp->size); |
1124 | skb->len += txp->size; | 1128 | skb->len += txp->size; |
1125 | skb->data_len += txp->size; | 1129 | skb->data_len += txp->size; |
1126 | skb->truesize += txp->size; | 1130 | skb->truesize += txp->size; |
1127 | 1131 | ||
1128 | /* Take an extra reference to offset network stack's put_page */ | 1132 | /* Take an extra reference to offset network stack's put_page */ |
1129 | get_page(vif->mmap_pages[pending_idx]); | 1133 | get_page(queue->mmap_pages[pending_idx]); |
1130 | } | 1134 | } |
1131 | /* FIXME: __skb_fill_page_desc set this to true because page->pfmemalloc | 1135 | /* FIXME: __skb_fill_page_desc set this to true because page->pfmemalloc |
1132 | * overlaps with "index", and "mapping" is not set. I think mapping | 1136 | * overlaps with "index", and "mapping" is not set. I think mapping |
@@ -1136,33 +1140,33 @@ static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb) | |||
1136 | skb->pfmemalloc = false; | 1140 | skb->pfmemalloc = false; |
1137 | } | 1141 | } |
1138 | 1142 | ||
1139 | static int xenvif_get_extras(struct xenvif *vif, | 1143 | static int xenvif_get_extras(struct xenvif_queue *queue, |
1140 | struct xen_netif_extra_info *extras, | 1144 | struct xen_netif_extra_info *extras, |
1141 | int work_to_do) | 1145 | int work_to_do) |
1142 | { | 1146 | { |
1143 | struct xen_netif_extra_info extra; | 1147 | struct xen_netif_extra_info extra; |
1144 | RING_IDX cons = vif->tx.req_cons; | 1148 | RING_IDX cons = queue->tx.req_cons; |
1145 | 1149 | ||
1146 | do { | 1150 | do { |
1147 | if (unlikely(work_to_do-- <= 0)) { | 1151 | if (unlikely(work_to_do-- <= 0)) { |
1148 | netdev_err(vif->dev, "Missing extra info\n"); | 1152 | netdev_err(queue->vif->dev, "Missing extra info\n"); |
1149 | xenvif_fatal_tx_err(vif); | 1153 | xenvif_fatal_tx_err(queue->vif); |
1150 | return -EBADR; | 1154 | return -EBADR; |
1151 | } | 1155 | } |
1152 | 1156 | ||
1153 | memcpy(&extra, RING_GET_REQUEST(&vif->tx, cons), | 1157 | memcpy(&extra, RING_GET_REQUEST(&queue->tx, cons), |
1154 | sizeof(extra)); | 1158 | sizeof(extra)); |
1155 | if (unlikely(!extra.type || | 1159 | if (unlikely(!extra.type || |
1156 | extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) { | 1160 | extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) { |
1157 | vif->tx.req_cons = ++cons; | 1161 | queue->tx.req_cons = ++cons; |
1158 | netdev_err(vif->dev, | 1162 | netdev_err(queue->vif->dev, |
1159 | "Invalid extra type: %d\n", extra.type); | 1163 | "Invalid extra type: %d\n", extra.type); |
1160 | xenvif_fatal_tx_err(vif); | 1164 | xenvif_fatal_tx_err(queue->vif); |
1161 | return -EINVAL; | 1165 | return -EINVAL; |
1162 | } | 1166 | } |
1163 | 1167 | ||
1164 | memcpy(&extras[extra.type - 1], &extra, sizeof(extra)); | 1168 | memcpy(&extras[extra.type - 1], &extra, sizeof(extra)); |
1165 | vif->tx.req_cons = ++cons; | 1169 | queue->tx.req_cons = ++cons; |
1166 | } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE); | 1170 | } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE); |
1167 | 1171 | ||
1168 | return work_to_do; | 1172 | return work_to_do; |
@@ -1197,7 +1201,7 @@ static int xenvif_set_skb_gso(struct xenvif *vif, | |||
1197 | return 0; | 1201 | return 0; |
1198 | } | 1202 | } |
1199 | 1203 | ||
1200 | static int checksum_setup(struct xenvif *vif, struct sk_buff *skb) | 1204 | static int checksum_setup(struct xenvif_queue *queue, struct sk_buff *skb) |
1201 | { | 1205 | { |
1202 | bool recalculate_partial_csum = false; | 1206 | bool recalculate_partial_csum = false; |
1203 | 1207 | ||
@@ -1207,7 +1211,7 @@ static int checksum_setup(struct xenvif *vif, struct sk_buff *skb) | |||
1207 | * recalculate the partial checksum. | 1211 | * recalculate the partial checksum. |
1208 | */ | 1212 | */ |
1209 | if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) { | 1213 | if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) { |
1210 | vif->rx_gso_checksum_fixup++; | 1214 | queue->stats.rx_gso_checksum_fixup++; |
1211 | skb->ip_summed = CHECKSUM_PARTIAL; | 1215 | skb->ip_summed = CHECKSUM_PARTIAL; |
1212 | recalculate_partial_csum = true; | 1216 | recalculate_partial_csum = true; |
1213 | } | 1217 | } |
@@ -1219,31 +1223,31 @@ static int checksum_setup(struct xenvif *vif, struct sk_buff *skb) | |||
1219 | return skb_checksum_setup(skb, recalculate_partial_csum); | 1223 | return skb_checksum_setup(skb, recalculate_partial_csum); |
1220 | } | 1224 | } |
1221 | 1225 | ||
1222 | static bool tx_credit_exceeded(struct xenvif *vif, unsigned size) | 1226 | static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size) |
1223 | { | 1227 | { |
1224 | u64 now = get_jiffies_64(); | 1228 | u64 now = get_jiffies_64(); |
1225 | u64 next_credit = vif->credit_window_start + | 1229 | u64 next_credit = queue->credit_window_start + |
1226 | msecs_to_jiffies(vif->credit_usec / 1000); | 1230 | msecs_to_jiffies(queue->credit_usec / 1000); |
1227 | 1231 | ||
1228 | /* Timer could already be pending in rare cases. */ | 1232 | /* Timer could already be pending in rare cases. */ |
1229 | if (timer_pending(&vif->credit_timeout)) | 1233 | if (timer_pending(&queue->credit_timeout)) |
1230 | return true; | 1234 | return true; |
1231 | 1235 | ||
1232 | /* Passed the point where we can replenish credit? */ | 1236 | /* Passed the point where we can replenish credit? */ |
1233 | if (time_after_eq64(now, next_credit)) { | 1237 | if (time_after_eq64(now, next_credit)) { |
1234 | vif->credit_window_start = now; | 1238 | queue->credit_window_start = now; |
1235 | tx_add_credit(vif); | 1239 | tx_add_credit(queue); |
1236 | } | 1240 | } |
1237 | 1241 | ||
1238 | /* Still too big to send right now? Set a callback. */ | 1242 | /* Still too big to send right now? Set a callback. */ |
1239 | if (size > vif->remaining_credit) { | 1243 | if (size > queue->remaining_credit) { |
1240 | vif->credit_timeout.data = | 1244 | queue->credit_timeout.data = |
1241 | (unsigned long)vif; | 1245 | (unsigned long)queue; |
1242 | vif->credit_timeout.function = | 1246 | queue->credit_timeout.function = |
1243 | tx_credit_callback; | 1247 | tx_credit_callback; |
1244 | mod_timer(&vif->credit_timeout, | 1248 | mod_timer(&queue->credit_timeout, |
1245 | next_credit); | 1249 | next_credit); |
1246 | vif->credit_window_start = next_credit; | 1250 | queue->credit_window_start = next_credit; |
1247 | 1251 | ||
1248 | return true; | 1252 | return true; |
1249 | } | 1253 | } |
@@ -1251,16 +1255,16 @@ static bool tx_credit_exceeded(struct xenvif *vif, unsigned size) | |||
1251 | return false; | 1255 | return false; |
1252 | } | 1256 | } |
1253 | 1257 | ||
1254 | static void xenvif_tx_build_gops(struct xenvif *vif, | 1258 | static void xenvif_tx_build_gops(struct xenvif_queue *queue, |
1255 | int budget, | 1259 | int budget, |
1256 | unsigned *copy_ops, | 1260 | unsigned *copy_ops, |
1257 | unsigned *map_ops) | 1261 | unsigned *map_ops) |
1258 | { | 1262 | { |
1259 | struct gnttab_map_grant_ref *gop = vif->tx_map_ops, *request_gop; | 1263 | struct gnttab_map_grant_ref *gop = queue->tx_map_ops, *request_gop; |
1260 | struct sk_buff *skb; | 1264 | struct sk_buff *skb; |
1261 | int ret; | 1265 | int ret; |
1262 | 1266 | ||
1263 | while (skb_queue_len(&vif->tx_queue) < budget) { | 1267 | while (skb_queue_len(&queue->tx_queue) < budget) { |
1264 | struct xen_netif_tx_request txreq; | 1268 | struct xen_netif_tx_request txreq; |
1265 | struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX]; | 1269 | struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX]; |
1266 | struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1]; | 1270 | struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1]; |
@@ -1270,69 +1274,69 @@ static void xenvif_tx_build_gops(struct xenvif *vif, | |||
1270 | unsigned int data_len; | 1274 | unsigned int data_len; |
1271 | pending_ring_idx_t index; | 1275 | pending_ring_idx_t index; |
1272 | 1276 | ||
1273 | if (vif->tx.sring->req_prod - vif->tx.req_cons > | 1277 | if (queue->tx.sring->req_prod - queue->tx.req_cons > |
1274 | XEN_NETIF_TX_RING_SIZE) { | 1278 | XEN_NETIF_TX_RING_SIZE) { |
1275 | netdev_err(vif->dev, | 1279 | netdev_err(queue->vif->dev, |
1276 | "Impossible number of requests. " | 1280 | "Impossible number of requests. " |
1277 | "req_prod %d, req_cons %d, size %ld\n", | 1281 | "req_prod %d, req_cons %d, size %ld\n", |
1278 | vif->tx.sring->req_prod, vif->tx.req_cons, | 1282 | queue->tx.sring->req_prod, queue->tx.req_cons, |
1279 | XEN_NETIF_TX_RING_SIZE); | 1283 | XEN_NETIF_TX_RING_SIZE); |
1280 | xenvif_fatal_tx_err(vif); | 1284 | xenvif_fatal_tx_err(queue->vif); |
1281 | break; | 1285 | break; |
1282 | } | 1286 | } |
1283 | 1287 | ||
1284 | work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&vif->tx); | 1288 | work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx); |
1285 | if (!work_to_do) | 1289 | if (!work_to_do) |
1286 | break; | 1290 | break; |
1287 | 1291 | ||
1288 | idx = vif->tx.req_cons; | 1292 | idx = queue->tx.req_cons; |
1289 | rmb(); /* Ensure that we see the request before we copy it. */ | 1293 | rmb(); /* Ensure that we see the request before we copy it. */ |
1290 | memcpy(&txreq, RING_GET_REQUEST(&vif->tx, idx), sizeof(txreq)); | 1294 | memcpy(&txreq, RING_GET_REQUEST(&queue->tx, idx), sizeof(txreq)); |
1291 | 1295 | ||
1292 | /* Credit-based scheduling. */ | 1296 | /* Credit-based scheduling. */ |
1293 | if (txreq.size > vif->remaining_credit && | 1297 | if (txreq.size > queue->remaining_credit && |
1294 | tx_credit_exceeded(vif, txreq.size)) | 1298 | tx_credit_exceeded(queue, txreq.size)) |
1295 | break; | 1299 | break; |
1296 | 1300 | ||
1297 | vif->remaining_credit -= txreq.size; | 1301 | queue->remaining_credit -= txreq.size; |
1298 | 1302 | ||
1299 | work_to_do--; | 1303 | work_to_do--; |
1300 | vif->tx.req_cons = ++idx; | 1304 | queue->tx.req_cons = ++idx; |
1301 | 1305 | ||
1302 | memset(extras, 0, sizeof(extras)); | 1306 | memset(extras, 0, sizeof(extras)); |
1303 | if (txreq.flags & XEN_NETTXF_extra_info) { | 1307 | if (txreq.flags & XEN_NETTXF_extra_info) { |
1304 | work_to_do = xenvif_get_extras(vif, extras, | 1308 | work_to_do = xenvif_get_extras(queue, extras, |
1305 | work_to_do); | 1309 | work_to_do); |
1306 | idx = vif->tx.req_cons; | 1310 | idx = queue->tx.req_cons; |
1307 | if (unlikely(work_to_do < 0)) | 1311 | if (unlikely(work_to_do < 0)) |
1308 | break; | 1312 | break; |
1309 | } | 1313 | } |
1310 | 1314 | ||
1311 | ret = xenvif_count_requests(vif, &txreq, txfrags, work_to_do); | 1315 | ret = xenvif_count_requests(queue, &txreq, txfrags, work_to_do); |
1312 | if (unlikely(ret < 0)) | 1316 | if (unlikely(ret < 0)) |
1313 | break; | 1317 | break; |
1314 | 1318 | ||
1315 | idx += ret; | 1319 | idx += ret; |
1316 | 1320 | ||
1317 | if (unlikely(txreq.size < ETH_HLEN)) { | 1321 | if (unlikely(txreq.size < ETH_HLEN)) { |
1318 | netdev_dbg(vif->dev, | 1322 | netdev_dbg(queue->vif->dev, |
1319 | "Bad packet size: %d\n", txreq.size); | 1323 | "Bad packet size: %d\n", txreq.size); |
1320 | xenvif_tx_err(vif, &txreq, idx); | 1324 | xenvif_tx_err(queue, &txreq, idx); |
1321 | break; | 1325 | break; |
1322 | } | 1326 | } |
1323 | 1327 | ||
1324 | /* No crossing a page as the payload mustn't fragment. */ | 1328 | /* No crossing a page as the payload mustn't fragment. */ |
1325 | if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) { | 1329 | if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) { |
1326 | netdev_err(vif->dev, | 1330 | netdev_err(queue->vif->dev, |
1327 | "txreq.offset: %x, size: %u, end: %lu\n", | 1331 | "txreq.offset: %x, size: %u, end: %lu\n", |
1328 | txreq.offset, txreq.size, | 1332 | txreq.offset, txreq.size, |
1329 | (txreq.offset&~PAGE_MASK) + txreq.size); | 1333 | (txreq.offset&~PAGE_MASK) + txreq.size); |
1330 | xenvif_fatal_tx_err(vif); | 1334 | xenvif_fatal_tx_err(queue->vif); |
1331 | break; | 1335 | break; |
1332 | } | 1336 | } |
1333 | 1337 | ||
1334 | index = pending_index(vif->pending_cons); | 1338 | index = pending_index(queue->pending_cons); |
1335 | pending_idx = vif->pending_ring[index]; | 1339 | pending_idx = queue->pending_ring[index]; |
1336 | 1340 | ||
1337 | data_len = (txreq.size > PKT_PROT_LEN && | 1341 | data_len = (txreq.size > PKT_PROT_LEN && |
1338 | ret < XEN_NETBK_LEGACY_SLOTS_MAX) ? | 1342 | ret < XEN_NETBK_LEGACY_SLOTS_MAX) ? |
@@ -1340,9 +1344,9 @@ static void xenvif_tx_build_gops(struct xenvif *vif, | |||
1340 | 1344 | ||
1341 | skb = xenvif_alloc_skb(data_len); | 1345 | skb = xenvif_alloc_skb(data_len); |
1342 | if (unlikely(skb == NULL)) { | 1346 | if (unlikely(skb == NULL)) { |
1343 | netdev_dbg(vif->dev, | 1347 | netdev_dbg(queue->vif->dev, |
1344 | "Can't allocate a skb in start_xmit.\n"); | 1348 | "Can't allocate a skb in start_xmit.\n"); |
1345 | xenvif_tx_err(vif, &txreq, idx); | 1349 | xenvif_tx_err(queue, &txreq, idx); |
1346 | break; | 1350 | break; |
1347 | } | 1351 | } |
1348 | 1352 | ||
@@ -1350,7 +1354,7 @@ static void xenvif_tx_build_gops(struct xenvif *vif, | |||
1350 | struct xen_netif_extra_info *gso; | 1354 | struct xen_netif_extra_info *gso; |
1351 | gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; | 1355 | gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; |
1352 | 1356 | ||
1353 | if (xenvif_set_skb_gso(vif, skb, gso)) { | 1357 | if (xenvif_set_skb_gso(queue->vif, skb, gso)) { |
1354 | /* Failure in xenvif_set_skb_gso is fatal. */ | 1358 | /* Failure in xenvif_set_skb_gso is fatal. */ |
1355 | kfree_skb(skb); | 1359 | kfree_skb(skb); |
1356 | break; | 1360 | break; |
@@ -1360,18 +1364,18 @@ static void xenvif_tx_build_gops(struct xenvif *vif, | |||
1360 | XENVIF_TX_CB(skb)->pending_idx = pending_idx; | 1364 | XENVIF_TX_CB(skb)->pending_idx = pending_idx; |
1361 | 1365 | ||
1362 | __skb_put(skb, data_len); | 1366 | __skb_put(skb, data_len); |
1363 | vif->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref; | 1367 | queue->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref; |
1364 | vif->tx_copy_ops[*copy_ops].source.domid = vif->domid; | 1368 | queue->tx_copy_ops[*copy_ops].source.domid = queue->vif->domid; |
1365 | vif->tx_copy_ops[*copy_ops].source.offset = txreq.offset; | 1369 | queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset; |
1366 | 1370 | ||
1367 | vif->tx_copy_ops[*copy_ops].dest.u.gmfn = | 1371 | queue->tx_copy_ops[*copy_ops].dest.u.gmfn = |
1368 | virt_to_mfn(skb->data); | 1372 | virt_to_mfn(skb->data); |
1369 | vif->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF; | 1373 | queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF; |
1370 | vif->tx_copy_ops[*copy_ops].dest.offset = | 1374 | queue->tx_copy_ops[*copy_ops].dest.offset = |
1371 | offset_in_page(skb->data); | 1375 | offset_in_page(skb->data); |
1372 | 1376 | ||
1373 | vif->tx_copy_ops[*copy_ops].len = data_len; | 1377 | queue->tx_copy_ops[*copy_ops].len = data_len; |
1374 | vif->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref; | 1378 | queue->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref; |
1375 | 1379 | ||
1376 | (*copy_ops)++; | 1380 | (*copy_ops)++; |
1377 | 1381 | ||
@@ -1380,42 +1384,42 @@ static void xenvif_tx_build_gops(struct xenvif *vif, | |||
1380 | skb_shinfo(skb)->nr_frags++; | 1384 | skb_shinfo(skb)->nr_frags++; |
1381 | frag_set_pending_idx(&skb_shinfo(skb)->frags[0], | 1385 | frag_set_pending_idx(&skb_shinfo(skb)->frags[0], |
1382 | pending_idx); | 1386 | pending_idx); |
1383 | xenvif_tx_create_map_op(vif, pending_idx, &txreq, gop); | 1387 | xenvif_tx_create_map_op(queue, pending_idx, &txreq, gop); |
1384 | gop++; | 1388 | gop++; |
1385 | } else { | 1389 | } else { |
1386 | frag_set_pending_idx(&skb_shinfo(skb)->frags[0], | 1390 | frag_set_pending_idx(&skb_shinfo(skb)->frags[0], |
1387 | INVALID_PENDING_IDX); | 1391 | INVALID_PENDING_IDX); |
1388 | memcpy(&vif->pending_tx_info[pending_idx].req, &txreq, | 1392 | memcpy(&queue->pending_tx_info[pending_idx].req, &txreq, |
1389 | sizeof(txreq)); | 1393 | sizeof(txreq)); |
1390 | } | 1394 | } |
1391 | 1395 | ||
1392 | vif->pending_cons++; | 1396 | queue->pending_cons++; |
1393 | 1397 | ||
1394 | request_gop = xenvif_get_requests(vif, skb, txfrags, gop); | 1398 | request_gop = xenvif_get_requests(queue, skb, txfrags, gop); |
1395 | if (request_gop == NULL) { | 1399 | if (request_gop == NULL) { |
1396 | kfree_skb(skb); | 1400 | kfree_skb(skb); |
1397 | xenvif_tx_err(vif, &txreq, idx); | 1401 | xenvif_tx_err(queue, &txreq, idx); |
1398 | break; | 1402 | break; |
1399 | } | 1403 | } |
1400 | gop = request_gop; | 1404 | gop = request_gop; |
1401 | 1405 | ||
1402 | __skb_queue_tail(&vif->tx_queue, skb); | 1406 | __skb_queue_tail(&queue->tx_queue, skb); |
1403 | 1407 | ||
1404 | vif->tx.req_cons = idx; | 1408 | queue->tx.req_cons = idx; |
1405 | 1409 | ||
1406 | if (((gop-vif->tx_map_ops) >= ARRAY_SIZE(vif->tx_map_ops)) || | 1410 | if (((gop-queue->tx_map_ops) >= ARRAY_SIZE(queue->tx_map_ops)) || |
1407 | (*copy_ops >= ARRAY_SIZE(vif->tx_copy_ops))) | 1411 | (*copy_ops >= ARRAY_SIZE(queue->tx_copy_ops))) |
1408 | break; | 1412 | break; |
1409 | } | 1413 | } |
1410 | 1414 | ||
1411 | (*map_ops) = gop - vif->tx_map_ops; | 1415 | (*map_ops) = gop - queue->tx_map_ops; |
1412 | return; | 1416 | return; |
1413 | } | 1417 | } |
1414 | 1418 | ||
1415 | /* Consolidate skb with a frag_list into a brand new one with local pages on | 1419 | /* Consolidate skb with a frag_list into a brand new one with local pages on |
1416 | * frags. Returns 0 or -ENOMEM if can't allocate new pages. | 1420 | * frags. Returns 0 or -ENOMEM if can't allocate new pages. |
1417 | */ | 1421 | */ |
1418 | static int xenvif_handle_frag_list(struct xenvif *vif, struct sk_buff *skb) | 1422 | static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *skb) |
1419 | { | 1423 | { |
1420 | unsigned int offset = skb_headlen(skb); | 1424 | unsigned int offset = skb_headlen(skb); |
1421 | skb_frag_t frags[MAX_SKB_FRAGS]; | 1425 | skb_frag_t frags[MAX_SKB_FRAGS]; |
@@ -1423,10 +1427,10 @@ static int xenvif_handle_frag_list(struct xenvif *vif, struct sk_buff *skb) | |||
1423 | struct ubuf_info *uarg; | 1427 | struct ubuf_info *uarg; |
1424 | struct sk_buff *nskb = skb_shinfo(skb)->frag_list; | 1428 | struct sk_buff *nskb = skb_shinfo(skb)->frag_list; |
1425 | 1429 | ||
1426 | vif->tx_zerocopy_sent += 2; | 1430 | queue->stats.tx_zerocopy_sent += 2; |
1427 | vif->tx_frag_overflow++; | 1431 | queue->stats.tx_frag_overflow++; |
1428 | 1432 | ||
1429 | xenvif_fill_frags(vif, nskb); | 1433 | xenvif_fill_frags(queue, nskb); |
1430 | /* Subtract frags size, we will correct it later */ | 1434 | /* Subtract frags size, we will correct it later */ |
1431 | skb->truesize -= skb->data_len; | 1435 | skb->truesize -= skb->data_len; |
1432 | skb->len += nskb->len; | 1436 | skb->len += nskb->len; |
@@ -1478,37 +1482,37 @@ static int xenvif_handle_frag_list(struct xenvif *vif, struct sk_buff *skb) | |||
1478 | return 0; | 1482 | return 0; |
1479 | } | 1483 | } |
1480 | 1484 | ||
1481 | static int xenvif_tx_submit(struct xenvif *vif) | 1485 | static int xenvif_tx_submit(struct xenvif_queue *queue) |
1482 | { | 1486 | { |
1483 | struct gnttab_map_grant_ref *gop_map = vif->tx_map_ops; | 1487 | struct gnttab_map_grant_ref *gop_map = queue->tx_map_ops; |
1484 | struct gnttab_copy *gop_copy = vif->tx_copy_ops; | 1488 | struct gnttab_copy *gop_copy = queue->tx_copy_ops; |
1485 | struct sk_buff *skb; | 1489 | struct sk_buff *skb; |
1486 | int work_done = 0; | 1490 | int work_done = 0; |
1487 | 1491 | ||
1488 | while ((skb = __skb_dequeue(&vif->tx_queue)) != NULL) { | 1492 | while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) { |
1489 | struct xen_netif_tx_request *txp; | 1493 | struct xen_netif_tx_request *txp; |
1490 | u16 pending_idx; | 1494 | u16 pending_idx; |
1491 | unsigned data_len; | 1495 | unsigned data_len; |
1492 | 1496 | ||
1493 | pending_idx = XENVIF_TX_CB(skb)->pending_idx; | 1497 | pending_idx = XENVIF_TX_CB(skb)->pending_idx; |
1494 | txp = &vif->pending_tx_info[pending_idx].req; | 1498 | txp = &queue->pending_tx_info[pending_idx].req; |
1495 | 1499 | ||
1496 | /* Check the remap error code. */ | 1500 | /* Check the remap error code. */ |
1497 | if (unlikely(xenvif_tx_check_gop(vif, skb, &gop_map, &gop_copy))) { | 1501 | if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) { |
1498 | skb_shinfo(skb)->nr_frags = 0; | 1502 | skb_shinfo(skb)->nr_frags = 0; |
1499 | kfree_skb(skb); | 1503 | kfree_skb(skb); |
1500 | continue; | 1504 | continue; |
1501 | } | 1505 | } |
1502 | 1506 | ||
1503 | data_len = skb->len; | 1507 | data_len = skb->len; |
1504 | callback_param(vif, pending_idx).ctx = NULL; | 1508 | callback_param(queue, pending_idx).ctx = NULL; |
1505 | if (data_len < txp->size) { | 1509 | if (data_len < txp->size) { |
1506 | /* Append the packet payload as a fragment. */ | 1510 | /* Append the packet payload as a fragment. */ |
1507 | txp->offset += data_len; | 1511 | txp->offset += data_len; |
1508 | txp->size -= data_len; | 1512 | txp->size -= data_len; |
1509 | } else { | 1513 | } else { |
1510 | /* Schedule a response immediately. */ | 1514 | /* Schedule a response immediately. */ |
1511 | xenvif_idx_release(vif, pending_idx, | 1515 | xenvif_idx_release(queue, pending_idx, |
1512 | XEN_NETIF_RSP_OKAY); | 1516 | XEN_NETIF_RSP_OKAY); |
1513 | } | 1517 | } |
1514 | 1518 | ||
@@ -1517,12 +1521,12 @@ static int xenvif_tx_submit(struct xenvif *vif) | |||
1517 | else if (txp->flags & XEN_NETTXF_data_validated) | 1521 | else if (txp->flags & XEN_NETTXF_data_validated) |
1518 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 1522 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
1519 | 1523 | ||
1520 | xenvif_fill_frags(vif, skb); | 1524 | xenvif_fill_frags(queue, skb); |
1521 | 1525 | ||
1522 | if (unlikely(skb_has_frag_list(skb))) { | 1526 | if (unlikely(skb_has_frag_list(skb))) { |
1523 | if (xenvif_handle_frag_list(vif, skb)) { | 1527 | if (xenvif_handle_frag_list(queue, skb)) { |
1524 | if (net_ratelimit()) | 1528 | if (net_ratelimit()) |
1525 | netdev_err(vif->dev, | 1529 | netdev_err(queue->vif->dev, |
1526 | "Not enough memory to consolidate frag_list!\n"); | 1530 | "Not enough memory to consolidate frag_list!\n"); |
1527 | skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; | 1531 | skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; |
1528 | kfree_skb(skb); | 1532 | kfree_skb(skb); |
@@ -1535,12 +1539,12 @@ static int xenvif_tx_submit(struct xenvif *vif) | |||
1535 | __pskb_pull_tail(skb, target - skb_headlen(skb)); | 1539 | __pskb_pull_tail(skb, target - skb_headlen(skb)); |
1536 | } | 1540 | } |
1537 | 1541 | ||
1538 | skb->dev = vif->dev; | 1542 | skb->dev = queue->vif->dev; |
1539 | skb->protocol = eth_type_trans(skb, skb->dev); | 1543 | skb->protocol = eth_type_trans(skb, skb->dev); |
1540 | skb_reset_network_header(skb); | 1544 | skb_reset_network_header(skb); |
1541 | 1545 | ||
1542 | if (checksum_setup(vif, skb)) { | 1546 | if (checksum_setup(queue, skb)) { |
1543 | netdev_dbg(vif->dev, | 1547 | netdev_dbg(queue->vif->dev, |
1544 | "Can't setup checksum in net_tx_action\n"); | 1548 | "Can't setup checksum in net_tx_action\n"); |
1545 | /* We have to set this flag to trigger the callback */ | 1549 | /* We have to set this flag to trigger the callback */ |
1546 | if (skb_shinfo(skb)->destructor_arg) | 1550 | if (skb_shinfo(skb)->destructor_arg) |
@@ -1565,8 +1569,8 @@ static int xenvif_tx_submit(struct xenvif *vif) | |||
1565 | DIV_ROUND_UP(skb->len - hdrlen, mss); | 1569 | DIV_ROUND_UP(skb->len - hdrlen, mss); |
1566 | } | 1570 | } |
1567 | 1571 | ||
1568 | vif->dev->stats.rx_bytes += skb->len; | 1572 | queue->stats.rx_bytes += skb->len; |
1569 | vif->dev->stats.rx_packets++; | 1573 | queue->stats.rx_packets++; |
1570 | 1574 | ||
1571 | work_done++; | 1575 | work_done++; |
1572 | 1576 | ||
@@ -1577,7 +1581,7 @@ static int xenvif_tx_submit(struct xenvif *vif) | |||
1577 | */ | 1581 | */ |
1578 | if (skb_shinfo(skb)->destructor_arg) { | 1582 | if (skb_shinfo(skb)->destructor_arg) { |
1579 | skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; | 1583 | skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; |
1580 | vif->tx_zerocopy_sent++; | 1584 | queue->stats.tx_zerocopy_sent++; |
1581 | } | 1585 | } |
1582 | 1586 | ||
1583 | netif_receive_skb(skb); | 1587 | netif_receive_skb(skb); |
@@ -1590,47 +1594,47 @@ void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success) | |||
1590 | { | 1594 | { |
1591 | unsigned long flags; | 1595 | unsigned long flags; |
1592 | pending_ring_idx_t index; | 1596 | pending_ring_idx_t index; |
1593 | struct xenvif *vif = ubuf_to_vif(ubuf); | 1597 | struct xenvif_queue *queue = ubuf_to_queue(ubuf); |
1594 | 1598 | ||
1595 | /* This is the only place where we grab this lock, to protect callbacks | 1599 | /* This is the only place where we grab this lock, to protect callbacks |
1596 | * from each other. | 1600 | * from each other. |
1597 | */ | 1601 | */ |
1598 | spin_lock_irqsave(&vif->callback_lock, flags); | 1602 | spin_lock_irqsave(&queue->callback_lock, flags); |
1599 | do { | 1603 | do { |
1600 | u16 pending_idx = ubuf->desc; | 1604 | u16 pending_idx = ubuf->desc; |
1601 | ubuf = (struct ubuf_info *) ubuf->ctx; | 1605 | ubuf = (struct ubuf_info *) ubuf->ctx; |
1602 | BUG_ON(vif->dealloc_prod - vif->dealloc_cons >= | 1606 | BUG_ON(queue->dealloc_prod - queue->dealloc_cons >= |
1603 | MAX_PENDING_REQS); | 1607 | MAX_PENDING_REQS); |
1604 | index = pending_index(vif->dealloc_prod); | 1608 | index = pending_index(queue->dealloc_prod); |
1605 | vif->dealloc_ring[index] = pending_idx; | 1609 | queue->dealloc_ring[index] = pending_idx; |
1606 | /* Sync with xenvif_tx_dealloc_action: | 1610 | /* Sync with xenvif_tx_dealloc_action: |
1607 | * insert idx then incr producer. | 1611 | * insert idx then incr producer. |
1608 | */ | 1612 | */ |
1609 | smp_wmb(); | 1613 | smp_wmb(); |
1610 | vif->dealloc_prod++; | 1614 | queue->dealloc_prod++; |
1611 | } while (ubuf); | 1615 | } while (ubuf); |
1612 | wake_up(&vif->dealloc_wq); | 1616 | wake_up(&queue->dealloc_wq); |
1613 | spin_unlock_irqrestore(&vif->callback_lock, flags); | 1617 | spin_unlock_irqrestore(&queue->callback_lock, flags); |
1614 | 1618 | ||
1615 | if (likely(zerocopy_success)) | 1619 | if (likely(zerocopy_success)) |
1616 | vif->tx_zerocopy_success++; | 1620 | queue->stats.tx_zerocopy_success++; |
1617 | else | 1621 | else |
1618 | vif->tx_zerocopy_fail++; | 1622 | queue->stats.tx_zerocopy_fail++; |
1619 | } | 1623 | } |
1620 | 1624 | ||
1621 | static inline void xenvif_tx_dealloc_action(struct xenvif *vif) | 1625 | static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue) |
1622 | { | 1626 | { |
1623 | struct gnttab_unmap_grant_ref *gop; | 1627 | struct gnttab_unmap_grant_ref *gop; |
1624 | pending_ring_idx_t dc, dp; | 1628 | pending_ring_idx_t dc, dp; |
1625 | u16 pending_idx, pending_idx_release[MAX_PENDING_REQS]; | 1629 | u16 pending_idx, pending_idx_release[MAX_PENDING_REQS]; |
1626 | unsigned int i = 0; | 1630 | unsigned int i = 0; |
1627 | 1631 | ||
1628 | dc = vif->dealloc_cons; | 1632 | dc = queue->dealloc_cons; |
1629 | gop = vif->tx_unmap_ops; | 1633 | gop = queue->tx_unmap_ops; |
1630 | 1634 | ||
1631 | /* Free up any grants we have finished using */ | 1635 | /* Free up any grants we have finished using */ |
1632 | do { | 1636 | do { |
1633 | dp = vif->dealloc_prod; | 1637 | dp = queue->dealloc_prod; |
1634 | 1638 | ||
1635 | /* Ensure we see all indices enqueued by all | 1639 | /* Ensure we see all indices enqueued by all |
1636 | * xenvif_zerocopy_callback(). | 1640 | * xenvif_zerocopy_callback(). |
@@ -1638,38 +1642,38 @@ static inline void xenvif_tx_dealloc_action(struct xenvif *vif) | |||
1638 | smp_rmb(); | 1642 | smp_rmb(); |
1639 | 1643 | ||
1640 | while (dc != dp) { | 1644 | while (dc != dp) { |
1641 | BUG_ON(gop - vif->tx_unmap_ops > MAX_PENDING_REQS); | 1645 | BUG_ON(gop - queue->tx_unmap_ops > MAX_PENDING_REQS); |
1642 | pending_idx = | 1646 | pending_idx = |
1643 | vif->dealloc_ring[pending_index(dc++)]; | 1647 | queue->dealloc_ring[pending_index(dc++)]; |
1644 | 1648 | ||
1645 | pending_idx_release[gop-vif->tx_unmap_ops] = | 1649 | pending_idx_release[gop-queue->tx_unmap_ops] = |
1646 | pending_idx; | 1650 | pending_idx; |
1647 | vif->pages_to_unmap[gop-vif->tx_unmap_ops] = | 1651 | queue->pages_to_unmap[gop-queue->tx_unmap_ops] = |
1648 | vif->mmap_pages[pending_idx]; | 1652 | queue->mmap_pages[pending_idx]; |
1649 | gnttab_set_unmap_op(gop, | 1653 | gnttab_set_unmap_op(gop, |
1650 | idx_to_kaddr(vif, pending_idx), | 1654 | idx_to_kaddr(queue, pending_idx), |
1651 | GNTMAP_host_map, | 1655 | GNTMAP_host_map, |
1652 | vif->grant_tx_handle[pending_idx]); | 1656 | queue->grant_tx_handle[pending_idx]); |
1653 | xenvif_grant_handle_reset(vif, pending_idx); | 1657 | xenvif_grant_handle_reset(queue, pending_idx); |
1654 | ++gop; | 1658 | ++gop; |
1655 | } | 1659 | } |
1656 | 1660 | ||
1657 | } while (dp != vif->dealloc_prod); | 1661 | } while (dp != queue->dealloc_prod); |
1658 | 1662 | ||
1659 | vif->dealloc_cons = dc; | 1663 | queue->dealloc_cons = dc; |
1660 | 1664 | ||
1661 | if (gop - vif->tx_unmap_ops > 0) { | 1665 | if (gop - queue->tx_unmap_ops > 0) { |
1662 | int ret; | 1666 | int ret; |
1663 | ret = gnttab_unmap_refs(vif->tx_unmap_ops, | 1667 | ret = gnttab_unmap_refs(queue->tx_unmap_ops, |
1664 | NULL, | 1668 | NULL, |
1665 | vif->pages_to_unmap, | 1669 | queue->pages_to_unmap, |
1666 | gop - vif->tx_unmap_ops); | 1670 | gop - queue->tx_unmap_ops); |
1667 | if (ret) { | 1671 | if (ret) { |
1668 | netdev_err(vif->dev, "Unmap fail: nr_ops %tx ret %d\n", | 1672 | netdev_err(queue->vif->dev, "Unmap fail: nr_ops %tx ret %d\n", |
1669 | gop - vif->tx_unmap_ops, ret); | 1673 | gop - queue->tx_unmap_ops, ret); |
1670 | for (i = 0; i < gop - vif->tx_unmap_ops; ++i) { | 1674 | for (i = 0; i < gop - queue->tx_unmap_ops; ++i) { |
1671 | if (gop[i].status != GNTST_okay) | 1675 | if (gop[i].status != GNTST_okay) |
1672 | netdev_err(vif->dev, | 1676 | netdev_err(queue->vif->dev, |
1673 | " host_addr: %llx handle: %x status: %d\n", | 1677 | " host_addr: %llx handle: %x status: %d\n", |
1674 | gop[i].host_addr, | 1678 | gop[i].host_addr, |
1675 | gop[i].handle, | 1679 | gop[i].handle, |
@@ -1679,91 +1683,91 @@ static inline void xenvif_tx_dealloc_action(struct xenvif *vif) | |||
1679 | } | 1683 | } |
1680 | } | 1684 | } |
1681 | 1685 | ||
1682 | for (i = 0; i < gop - vif->tx_unmap_ops; ++i) | 1686 | for (i = 0; i < gop - queue->tx_unmap_ops; ++i) |
1683 | xenvif_idx_release(vif, pending_idx_release[i], | 1687 | xenvif_idx_release(queue, pending_idx_release[i], |
1684 | XEN_NETIF_RSP_OKAY); | 1688 | XEN_NETIF_RSP_OKAY); |
1685 | } | 1689 | } |
1686 | 1690 | ||
1687 | 1691 | ||
1688 | /* Called after netfront has transmitted */ | 1692 | /* Called after netfront has transmitted */ |
1689 | int xenvif_tx_action(struct xenvif *vif, int budget) | 1693 | int xenvif_tx_action(struct xenvif_queue *queue, int budget) |
1690 | { | 1694 | { |
1691 | unsigned nr_mops, nr_cops = 0; | 1695 | unsigned nr_mops, nr_cops = 0; |
1692 | int work_done, ret; | 1696 | int work_done, ret; |
1693 | 1697 | ||
1694 | if (unlikely(!tx_work_todo(vif))) | 1698 | if (unlikely(!tx_work_todo(queue))) |
1695 | return 0; | 1699 | return 0; |
1696 | 1700 | ||
1697 | xenvif_tx_build_gops(vif, budget, &nr_cops, &nr_mops); | 1701 | xenvif_tx_build_gops(queue, budget, &nr_cops, &nr_mops); |
1698 | 1702 | ||
1699 | if (nr_cops == 0) | 1703 | if (nr_cops == 0) |
1700 | return 0; | 1704 | return 0; |
1701 | 1705 | ||
1702 | gnttab_batch_copy(vif->tx_copy_ops, nr_cops); | 1706 | gnttab_batch_copy(queue->tx_copy_ops, nr_cops); |
1703 | if (nr_mops != 0) { | 1707 | if (nr_mops != 0) { |
1704 | ret = gnttab_map_refs(vif->tx_map_ops, | 1708 | ret = gnttab_map_refs(queue->tx_map_ops, |
1705 | NULL, | 1709 | NULL, |
1706 | vif->pages_to_map, | 1710 | queue->pages_to_map, |
1707 | nr_mops); | 1711 | nr_mops); |
1708 | BUG_ON(ret); | 1712 | BUG_ON(ret); |
1709 | } | 1713 | } |
1710 | 1714 | ||
1711 | work_done = xenvif_tx_submit(vif); | 1715 | work_done = xenvif_tx_submit(queue); |
1712 | 1716 | ||
1713 | return work_done; | 1717 | return work_done; |
1714 | } | 1718 | } |
1715 | 1719 | ||
1716 | static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx, | 1720 | static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, |
1717 | u8 status) | 1721 | u8 status) |
1718 | { | 1722 | { |
1719 | struct pending_tx_info *pending_tx_info; | 1723 | struct pending_tx_info *pending_tx_info; |
1720 | pending_ring_idx_t index; | 1724 | pending_ring_idx_t index; |
1721 | unsigned long flags; | 1725 | unsigned long flags; |
1722 | 1726 | ||
1723 | pending_tx_info = &vif->pending_tx_info[pending_idx]; | 1727 | pending_tx_info = &queue->pending_tx_info[pending_idx]; |
1724 | spin_lock_irqsave(&vif->response_lock, flags); | 1728 | spin_lock_irqsave(&queue->response_lock, flags); |
1725 | make_tx_response(vif, &pending_tx_info->req, status); | 1729 | make_tx_response(queue, &pending_tx_info->req, status); |
1726 | index = pending_index(vif->pending_prod); | 1730 | index = pending_index(queue->pending_prod); |
1727 | vif->pending_ring[index] = pending_idx; | 1731 | queue->pending_ring[index] = pending_idx; |
1728 | /* TX shouldn't use the index before we give it back here */ | 1732 | /* TX shouldn't use the index before we give it back here */ |
1729 | mb(); | 1733 | mb(); |
1730 | vif->pending_prod++; | 1734 | queue->pending_prod++; |
1731 | spin_unlock_irqrestore(&vif->response_lock, flags); | 1735 | spin_unlock_irqrestore(&queue->response_lock, flags); |
1732 | } | 1736 | } |
1733 | 1737 | ||
1734 | 1738 | ||
1735 | static void make_tx_response(struct xenvif *vif, | 1739 | static void make_tx_response(struct xenvif_queue *queue, |
1736 | struct xen_netif_tx_request *txp, | 1740 | struct xen_netif_tx_request *txp, |
1737 | s8 st) | 1741 | s8 st) |
1738 | { | 1742 | { |
1739 | RING_IDX i = vif->tx.rsp_prod_pvt; | 1743 | RING_IDX i = queue->tx.rsp_prod_pvt; |
1740 | struct xen_netif_tx_response *resp; | 1744 | struct xen_netif_tx_response *resp; |
1741 | int notify; | 1745 | int notify; |
1742 | 1746 | ||
1743 | resp = RING_GET_RESPONSE(&vif->tx, i); | 1747 | resp = RING_GET_RESPONSE(&queue->tx, i); |
1744 | resp->id = txp->id; | 1748 | resp->id = txp->id; |
1745 | resp->status = st; | 1749 | resp->status = st; |
1746 | 1750 | ||
1747 | if (txp->flags & XEN_NETTXF_extra_info) | 1751 | if (txp->flags & XEN_NETTXF_extra_info) |
1748 | RING_GET_RESPONSE(&vif->tx, ++i)->status = XEN_NETIF_RSP_NULL; | 1752 | RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL; |
1749 | 1753 | ||
1750 | vif->tx.rsp_prod_pvt = ++i; | 1754 | queue->tx.rsp_prod_pvt = ++i; |
1751 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->tx, notify); | 1755 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify); |
1752 | if (notify) | 1756 | if (notify) |
1753 | notify_remote_via_irq(vif->tx_irq); | 1757 | notify_remote_via_irq(queue->tx_irq); |
1754 | } | 1758 | } |
1755 | 1759 | ||
1756 | static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif, | 1760 | static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue, |
1757 | u16 id, | 1761 | u16 id, |
1758 | s8 st, | 1762 | s8 st, |
1759 | u16 offset, | 1763 | u16 offset, |
1760 | u16 size, | 1764 | u16 size, |
1761 | u16 flags) | 1765 | u16 flags) |
1762 | { | 1766 | { |
1763 | RING_IDX i = vif->rx.rsp_prod_pvt; | 1767 | RING_IDX i = queue->rx.rsp_prod_pvt; |
1764 | struct xen_netif_rx_response *resp; | 1768 | struct xen_netif_rx_response *resp; |
1765 | 1769 | ||
1766 | resp = RING_GET_RESPONSE(&vif->rx, i); | 1770 | resp = RING_GET_RESPONSE(&queue->rx, i); |
1767 | resp->offset = offset; | 1771 | resp->offset = offset; |
1768 | resp->flags = flags; | 1772 | resp->flags = flags; |
1769 | resp->id = id; | 1773 | resp->id = id; |
@@ -1771,26 +1775,26 @@ static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif, | |||
1771 | if (st < 0) | 1775 | if (st < 0) |
1772 | resp->status = (s16)st; | 1776 | resp->status = (s16)st; |
1773 | 1777 | ||
1774 | vif->rx.rsp_prod_pvt = ++i; | 1778 | queue->rx.rsp_prod_pvt = ++i; |
1775 | 1779 | ||
1776 | return resp; | 1780 | return resp; |
1777 | } | 1781 | } |
1778 | 1782 | ||
1779 | void xenvif_idx_unmap(struct xenvif *vif, u16 pending_idx) | 1783 | void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx) |
1780 | { | 1784 | { |
1781 | int ret; | 1785 | int ret; |
1782 | struct gnttab_unmap_grant_ref tx_unmap_op; | 1786 | struct gnttab_unmap_grant_ref tx_unmap_op; |
1783 | 1787 | ||
1784 | gnttab_set_unmap_op(&tx_unmap_op, | 1788 | gnttab_set_unmap_op(&tx_unmap_op, |
1785 | idx_to_kaddr(vif, pending_idx), | 1789 | idx_to_kaddr(queue, pending_idx), |
1786 | GNTMAP_host_map, | 1790 | GNTMAP_host_map, |
1787 | vif->grant_tx_handle[pending_idx]); | 1791 | queue->grant_tx_handle[pending_idx]); |
1788 | xenvif_grant_handle_reset(vif, pending_idx); | 1792 | xenvif_grant_handle_reset(queue, pending_idx); |
1789 | 1793 | ||
1790 | ret = gnttab_unmap_refs(&tx_unmap_op, NULL, | 1794 | ret = gnttab_unmap_refs(&tx_unmap_op, NULL, |
1791 | &vif->mmap_pages[pending_idx], 1); | 1795 | &queue->mmap_pages[pending_idx], 1); |
1792 | if (ret) { | 1796 | if (ret) { |
1793 | netdev_err(vif->dev, | 1797 | netdev_err(queue->vif->dev, |
1794 | "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: %x status: %d\n", | 1798 | "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: %x status: %d\n", |
1795 | ret, | 1799 | ret, |
1796 | pending_idx, | 1800 | pending_idx, |
@@ -1800,41 +1804,40 @@ void xenvif_idx_unmap(struct xenvif *vif, u16 pending_idx) | |||
1800 | BUG(); | 1804 | BUG(); |
1801 | } | 1805 | } |
1802 | 1806 | ||
1803 | xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY); | 1807 | xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_OKAY); |
1804 | } | 1808 | } |
1805 | 1809 | ||
1806 | static inline int rx_work_todo(struct xenvif *vif) | 1810 | static inline int rx_work_todo(struct xenvif_queue *queue) |
1807 | { | 1811 | { |
1808 | return (!skb_queue_empty(&vif->rx_queue) && | 1812 | return (!skb_queue_empty(&queue->rx_queue) && |
1809 | xenvif_rx_ring_slots_available(vif, vif->rx_last_skb_slots)) || | 1813 | xenvif_rx_ring_slots_available(queue, queue->rx_last_skb_slots)) || |
1810 | vif->rx_queue_purge; | 1814 | queue->rx_queue_purge; |
1811 | } | 1815 | } |
1812 | 1816 | ||
1813 | static inline int tx_work_todo(struct xenvif *vif) | 1817 | static inline int tx_work_todo(struct xenvif_queue *queue) |
1814 | { | 1818 | { |
1815 | 1819 | if (likely(RING_HAS_UNCONSUMED_REQUESTS(&queue->tx))) | |
1816 | if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->tx))) | ||
1817 | return 1; | 1820 | return 1; |
1818 | 1821 | ||
1819 | return 0; | 1822 | return 0; |
1820 | } | 1823 | } |
1821 | 1824 | ||
1822 | static inline bool tx_dealloc_work_todo(struct xenvif *vif) | 1825 | static inline bool tx_dealloc_work_todo(struct xenvif_queue *queue) |
1823 | { | 1826 | { |
1824 | return vif->dealloc_cons != vif->dealloc_prod; | 1827 | return queue->dealloc_cons != queue->dealloc_prod; |
1825 | } | 1828 | } |
1826 | 1829 | ||
1827 | void xenvif_unmap_frontend_rings(struct xenvif *vif) | 1830 | void xenvif_unmap_frontend_rings(struct xenvif_queue *queue) |
1828 | { | 1831 | { |
1829 | if (vif->tx.sring) | 1832 | if (queue->tx.sring) |
1830 | xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif), | 1833 | xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif), |
1831 | vif->tx.sring); | 1834 | queue->tx.sring); |
1832 | if (vif->rx.sring) | 1835 | if (queue->rx.sring) |
1833 | xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif), | 1836 | xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif), |
1834 | vif->rx.sring); | 1837 | queue->rx.sring); |
1835 | } | 1838 | } |
1836 | 1839 | ||
1837 | int xenvif_map_frontend_rings(struct xenvif *vif, | 1840 | int xenvif_map_frontend_rings(struct xenvif_queue *queue, |
1838 | grant_ref_t tx_ring_ref, | 1841 | grant_ref_t tx_ring_ref, |
1839 | grant_ref_t rx_ring_ref) | 1842 | grant_ref_t rx_ring_ref) |
1840 | { | 1843 | { |
@@ -1844,85 +1847,78 @@ int xenvif_map_frontend_rings(struct xenvif *vif, | |||
1844 | 1847 | ||
1845 | int err = -ENOMEM; | 1848 | int err = -ENOMEM; |
1846 | 1849 | ||
1847 | err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif), | 1850 | err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif), |
1848 | tx_ring_ref, &addr); | 1851 | tx_ring_ref, &addr); |
1849 | if (err) | 1852 | if (err) |
1850 | goto err; | 1853 | goto err; |
1851 | 1854 | ||
1852 | txs = (struct xen_netif_tx_sring *)addr; | 1855 | txs = (struct xen_netif_tx_sring *)addr; |
1853 | BACK_RING_INIT(&vif->tx, txs, PAGE_SIZE); | 1856 | BACK_RING_INIT(&queue->tx, txs, PAGE_SIZE); |
1854 | 1857 | ||
1855 | err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif), | 1858 | err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif), |
1856 | rx_ring_ref, &addr); | 1859 | rx_ring_ref, &addr); |
1857 | if (err) | 1860 | if (err) |
1858 | goto err; | 1861 | goto err; |
1859 | 1862 | ||
1860 | rxs = (struct xen_netif_rx_sring *)addr; | 1863 | rxs = (struct xen_netif_rx_sring *)addr; |
1861 | BACK_RING_INIT(&vif->rx, rxs, PAGE_SIZE); | 1864 | BACK_RING_INIT(&queue->rx, rxs, PAGE_SIZE); |
1862 | 1865 | ||
1863 | return 0; | 1866 | return 0; |
1864 | 1867 | ||
1865 | err: | 1868 | err: |
1866 | xenvif_unmap_frontend_rings(vif); | 1869 | xenvif_unmap_frontend_rings(queue); |
1867 | return err; | 1870 | return err; |
1868 | } | 1871 | } |
1869 | 1872 | ||
1870 | void xenvif_stop_queue(struct xenvif *vif) | 1873 | static void xenvif_start_queue(struct xenvif_queue *queue) |
1871 | { | ||
1872 | if (!vif->can_queue) | ||
1873 | return; | ||
1874 | |||
1875 | netif_stop_queue(vif->dev); | ||
1876 | } | ||
1877 | |||
1878 | static void xenvif_start_queue(struct xenvif *vif) | ||
1879 | { | 1874 | { |
1880 | if (xenvif_schedulable(vif)) | 1875 | if (xenvif_schedulable(queue->vif)) |
1881 | netif_wake_queue(vif->dev); | 1876 | xenvif_wake_queue(queue); |
1882 | } | 1877 | } |
1883 | 1878 | ||
1884 | int xenvif_kthread_guest_rx(void *data) | 1879 | int xenvif_kthread_guest_rx(void *data) |
1885 | { | 1880 | { |
1886 | struct xenvif *vif = data; | 1881 | struct xenvif_queue *queue = data; |
1887 | struct sk_buff *skb; | 1882 | struct sk_buff *skb; |
1888 | 1883 | ||
1889 | while (!kthread_should_stop()) { | 1884 | while (!kthread_should_stop()) { |
1890 | wait_event_interruptible(vif->wq, | 1885 | wait_event_interruptible(queue->wq, |
1891 | rx_work_todo(vif) || | 1886 | rx_work_todo(queue) || |
1892 | vif->disabled || | 1887 | queue->vif->disabled || |
1893 | kthread_should_stop()); | 1888 | kthread_should_stop()); |
1894 | 1889 | ||
1895 | /* This frontend is found to be rogue, disable it in | 1890 | /* This frontend is found to be rogue, disable it in |
1896 | * kthread context. Currently this is only set when | 1891 | * kthread context. Currently this is only set when |
1897 | * netback finds out frontend sends malformed packet, | 1892 | * netback finds out frontend sends malformed packet, |
1898 | * but we cannot disable the interface in softirq | 1893 | * but we cannot disable the interface in softirq |
1899 | * context so we defer it here. | 1894 | * context so we defer it here, if this thread is |
1895 | * associated with queue 0. | ||
1900 | */ | 1896 | */ |
1901 | if (unlikely(vif->disabled && netif_carrier_ok(vif->dev))) | 1897 | if (unlikely(queue->vif->disabled && netif_carrier_ok(queue->vif->dev) && queue->id == 0)) |
1902 | xenvif_carrier_off(vif); | 1898 | xenvif_carrier_off(queue->vif); |
1903 | 1899 | ||
1904 | if (kthread_should_stop()) | 1900 | if (kthread_should_stop()) |
1905 | break; | 1901 | break; |
1906 | 1902 | ||
1907 | if (vif->rx_queue_purge) { | 1903 | if (queue->rx_queue_purge) { |
1908 | skb_queue_purge(&vif->rx_queue); | 1904 | skb_queue_purge(&queue->rx_queue); |
1909 | vif->rx_queue_purge = false; | 1905 | queue->rx_queue_purge = false; |
1910 | } | 1906 | } |
1911 | 1907 | ||
1912 | if (!skb_queue_empty(&vif->rx_queue)) | 1908 | if (!skb_queue_empty(&queue->rx_queue)) |
1913 | xenvif_rx_action(vif); | 1909 | xenvif_rx_action(queue); |
1914 | 1910 | ||
1915 | if (skb_queue_empty(&vif->rx_queue) && | 1911 | if (skb_queue_empty(&queue->rx_queue) && |
1916 | netif_queue_stopped(vif->dev)) { | 1912 | xenvif_queue_stopped(queue)) { |
1917 | del_timer_sync(&vif->wake_queue); | 1913 | del_timer_sync(&queue->wake_queue); |
1918 | xenvif_start_queue(vif); | 1914 | xenvif_start_queue(queue); |
1919 | } | 1915 | } |
1920 | 1916 | ||
1921 | cond_resched(); | 1917 | cond_resched(); |
1922 | } | 1918 | } |
1923 | 1919 | ||
1924 | /* Bin any remaining skbs */ | 1920 | /* Bin any remaining skbs */ |
1925 | while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) | 1921 | while ((skb = skb_dequeue(&queue->rx_queue)) != NULL) |
1926 | dev_kfree_skb(skb); | 1922 | dev_kfree_skb(skb); |
1927 | 1923 | ||
1928 | return 0; | 1924 | return 0; |
@@ -1930,22 +1926,22 @@ int xenvif_kthread_guest_rx(void *data) | |||
1930 | 1926 | ||
1931 | int xenvif_dealloc_kthread(void *data) | 1927 | int xenvif_dealloc_kthread(void *data) |
1932 | { | 1928 | { |
1933 | struct xenvif *vif = data; | 1929 | struct xenvif_queue *queue = data; |
1934 | 1930 | ||
1935 | while (!kthread_should_stop()) { | 1931 | while (!kthread_should_stop()) { |
1936 | wait_event_interruptible(vif->dealloc_wq, | 1932 | wait_event_interruptible(queue->dealloc_wq, |
1937 | tx_dealloc_work_todo(vif) || | 1933 | tx_dealloc_work_todo(queue) || |
1938 | kthread_should_stop()); | 1934 | kthread_should_stop()); |
1939 | if (kthread_should_stop()) | 1935 | if (kthread_should_stop()) |
1940 | break; | 1936 | break; |
1941 | 1937 | ||
1942 | xenvif_tx_dealloc_action(vif); | 1938 | xenvif_tx_dealloc_action(queue); |
1943 | cond_resched(); | 1939 | cond_resched(); |
1944 | } | 1940 | } |
1945 | 1941 | ||
1946 | /* Unmap anything remaining*/ | 1942 | /* Unmap anything remaining*/ |
1947 | if (tx_dealloc_work_todo(vif)) | 1943 | if (tx_dealloc_work_todo(queue)) |
1948 | xenvif_tx_dealloc_action(vif); | 1944 | xenvif_tx_dealloc_action(queue); |
1949 | 1945 | ||
1950 | return 0; | 1946 | return 0; |
1951 | } | 1947 | } |