diff options
author | Wei Liu <wei.liu2@citrix.com> | 2013-08-26 07:59:39 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2013-08-29 01:18:04 -0400 |
commit | 7376419a4697657b2e0ab904a592aacc2e485bf1 (patch) | |
tree | afd53204a1c0374d5b85490d7edee7bee1ed72c9 | |
parent | b3f980bd827e6e81a050c518d60ed7811a83061d (diff) |
xen-netback: rename functions
As we move to 1:1 model and melt xen_netbk and xenvif together, it would
be better to use single prefix for all functions in xen-netback.
Signed-off-by: Wei Liu <wei.liu2@citrix.com>
Acked-by: Ian Campbell <ian.campbell@citrix.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/xen-netback/common.h | 24 | ||||
-rw-r--r-- | drivers/net/xen-netback/interface.c | 20 | ||||
-rw-r--r-- | drivers/net/xen-netback/netback.c | 223 |
3 files changed, 134 insertions, 133 deletions
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index 9c1f15872e17..a1977430ddfb 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h | |||
@@ -190,21 +190,21 @@ void xenvif_xenbus_fini(void); | |||
190 | 190 | ||
191 | int xenvif_schedulable(struct xenvif *vif); | 191 | int xenvif_schedulable(struct xenvif *vif); |
192 | 192 | ||
193 | int xen_netbk_rx_ring_full(struct xenvif *vif); | 193 | int xenvif_rx_ring_full(struct xenvif *vif); |
194 | 194 | ||
195 | int xen_netbk_must_stop_queue(struct xenvif *vif); | 195 | int xenvif_must_stop_queue(struct xenvif *vif); |
196 | 196 | ||
197 | /* (Un)Map communication rings. */ | 197 | /* (Un)Map communication rings. */ |
198 | void xen_netbk_unmap_frontend_rings(struct xenvif *vif); | 198 | void xenvif_unmap_frontend_rings(struct xenvif *vif); |
199 | int xen_netbk_map_frontend_rings(struct xenvif *vif, | 199 | int xenvif_map_frontend_rings(struct xenvif *vif, |
200 | grant_ref_t tx_ring_ref, | 200 | grant_ref_t tx_ring_ref, |
201 | grant_ref_t rx_ring_ref); | 201 | grant_ref_t rx_ring_ref); |
202 | 202 | ||
203 | /* Check for SKBs from frontend and schedule backend processing */ | 203 | /* Check for SKBs from frontend and schedule backend processing */ |
204 | void xen_netbk_check_rx_xenvif(struct xenvif *vif); | 204 | void xenvif_check_rx_xenvif(struct xenvif *vif); |
205 | 205 | ||
206 | /* Queue an SKB for transmission to the frontend */ | 206 | /* Queue an SKB for transmission to the frontend */ |
207 | void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb); | 207 | void xenvif_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb); |
208 | /* Notify xenvif that ring now has space to send an skb to the frontend */ | 208 | /* Notify xenvif that ring now has space to send an skb to the frontend */ |
209 | void xenvif_notify_tx_completion(struct xenvif *vif); | 209 | void xenvif_notify_tx_completion(struct xenvif *vif); |
210 | 210 | ||
@@ -212,12 +212,12 @@ void xenvif_notify_tx_completion(struct xenvif *vif); | |||
212 | void xenvif_carrier_off(struct xenvif *vif); | 212 | void xenvif_carrier_off(struct xenvif *vif); |
213 | 213 | ||
214 | /* Returns number of ring slots required to send an skb to the frontend */ | 214 | /* Returns number of ring slots required to send an skb to the frontend */ |
215 | unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb); | 215 | unsigned int xenvif_count_skb_slots(struct xenvif *vif, struct sk_buff *skb); |
216 | 216 | ||
217 | int xen_netbk_tx_action(struct xenvif *vif, int budget); | 217 | int xenvif_tx_action(struct xenvif *vif, int budget); |
218 | void xen_netbk_rx_action(struct xenvif *vif); | 218 | void xenvif_rx_action(struct xenvif *vif); |
219 | 219 | ||
220 | int xen_netbk_kthread(void *data); | 220 | int xenvif_kthread(void *data); |
221 | 221 | ||
222 | extern bool separate_tx_rx_irq; | 222 | extern bool separate_tx_rx_irq; |
223 | 223 | ||
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 44d6b707c77e..625c6f49cfba 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c | |||
@@ -48,7 +48,7 @@ int xenvif_schedulable(struct xenvif *vif) | |||
48 | 48 | ||
49 | static int xenvif_rx_schedulable(struct xenvif *vif) | 49 | static int xenvif_rx_schedulable(struct xenvif *vif) |
50 | { | 50 | { |
51 | return xenvif_schedulable(vif) && !xen_netbk_rx_ring_full(vif); | 51 | return xenvif_schedulable(vif) && !xenvif_rx_ring_full(vif); |
52 | } | 52 | } |
53 | 53 | ||
54 | static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id) | 54 | static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id) |
@@ -66,7 +66,7 @@ static int xenvif_poll(struct napi_struct *napi, int budget) | |||
66 | struct xenvif *vif = container_of(napi, struct xenvif, napi); | 66 | struct xenvif *vif = container_of(napi, struct xenvif, napi); |
67 | int work_done; | 67 | int work_done; |
68 | 68 | ||
69 | work_done = xen_netbk_tx_action(vif, budget); | 69 | work_done = xenvif_tx_action(vif, budget); |
70 | 70 | ||
71 | if (work_done < budget) { | 71 | if (work_done < budget) { |
72 | int more_to_do = 0; | 72 | int more_to_do = 0; |
@@ -133,12 +133,12 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
133 | goto drop; | 133 | goto drop; |
134 | 134 | ||
135 | /* Reserve ring slots for the worst-case number of fragments. */ | 135 | /* Reserve ring slots for the worst-case number of fragments. */ |
136 | vif->rx_req_cons_peek += xen_netbk_count_skb_slots(vif, skb); | 136 | vif->rx_req_cons_peek += xenvif_count_skb_slots(vif, skb); |
137 | 137 | ||
138 | if (vif->can_queue && xen_netbk_must_stop_queue(vif)) | 138 | if (vif->can_queue && xenvif_must_stop_queue(vif)) |
139 | netif_stop_queue(dev); | 139 | netif_stop_queue(dev); |
140 | 140 | ||
141 | xen_netbk_queue_tx_skb(vif, skb); | 141 | xenvif_queue_tx_skb(vif, skb); |
142 | 142 | ||
143 | return NETDEV_TX_OK; | 143 | return NETDEV_TX_OK; |
144 | 144 | ||
@@ -166,7 +166,7 @@ static void xenvif_up(struct xenvif *vif) | |||
166 | enable_irq(vif->tx_irq); | 166 | enable_irq(vif->tx_irq); |
167 | if (vif->tx_irq != vif->rx_irq) | 167 | if (vif->tx_irq != vif->rx_irq) |
168 | enable_irq(vif->rx_irq); | 168 | enable_irq(vif->rx_irq); |
169 | xen_netbk_check_rx_xenvif(vif); | 169 | xenvif_check_rx_xenvif(vif); |
170 | } | 170 | } |
171 | 171 | ||
172 | static void xenvif_down(struct xenvif *vif) | 172 | static void xenvif_down(struct xenvif *vif) |
@@ -368,7 +368,7 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, | |||
368 | 368 | ||
369 | __module_get(THIS_MODULE); | 369 | __module_get(THIS_MODULE); |
370 | 370 | ||
371 | err = xen_netbk_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref); | 371 | err = xenvif_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref); |
372 | if (err < 0) | 372 | if (err < 0) |
373 | goto err; | 373 | goto err; |
374 | 374 | ||
@@ -405,7 +405,7 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, | |||
405 | } | 405 | } |
406 | 406 | ||
407 | init_waitqueue_head(&vif->wq); | 407 | init_waitqueue_head(&vif->wq); |
408 | vif->task = kthread_create(xen_netbk_kthread, | 408 | vif->task = kthread_create(xenvif_kthread, |
409 | (void *)vif, vif->dev->name); | 409 | (void *)vif, vif->dev->name); |
410 | if (IS_ERR(vif->task)) { | 410 | if (IS_ERR(vif->task)) { |
411 | pr_warn("Could not allocate kthread for %s\n", vif->dev->name); | 411 | pr_warn("Could not allocate kthread for %s\n", vif->dev->name); |
@@ -433,7 +433,7 @@ err_tx_unbind: | |||
433 | unbind_from_irqhandler(vif->tx_irq, vif); | 433 | unbind_from_irqhandler(vif->tx_irq, vif); |
434 | vif->tx_irq = 0; | 434 | vif->tx_irq = 0; |
435 | err_unmap: | 435 | err_unmap: |
436 | xen_netbk_unmap_frontend_rings(vif); | 436 | xenvif_unmap_frontend_rings(vif); |
437 | err: | 437 | err: |
438 | module_put(THIS_MODULE); | 438 | module_put(THIS_MODULE); |
439 | return err; | 439 | return err; |
@@ -481,7 +481,7 @@ void xenvif_disconnect(struct xenvif *vif) | |||
481 | 481 | ||
482 | unregister_netdev(vif->dev); | 482 | unregister_netdev(vif->dev); |
483 | 483 | ||
484 | xen_netbk_unmap_frontend_rings(vif); | 484 | xenvif_unmap_frontend_rings(vif); |
485 | 485 | ||
486 | free_netdev(vif->dev); | 486 | free_netdev(vif->dev); |
487 | 487 | ||
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 44ccc674c02f..956130c70036 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c | |||
@@ -80,8 +80,9 @@ static inline int pending_tx_is_head(struct xenvif *vif, RING_IDX idx) | |||
80 | return vif->pending_tx_info[idx].head != INVALID_PENDING_RING_IDX; | 80 | return vif->pending_tx_info[idx].head != INVALID_PENDING_RING_IDX; |
81 | } | 81 | } |
82 | 82 | ||
83 | static void xen_netbk_idx_release(struct xenvif *vif, u16 pending_idx, | 83 | static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx, |
84 | u8 status); | 84 | u8 status); |
85 | |||
85 | static void make_tx_response(struct xenvif *vif, | 86 | static void make_tx_response(struct xenvif *vif, |
86 | struct xen_netif_tx_request *txp, | 87 | struct xen_netif_tx_request *txp, |
87 | s8 st); | 88 | s8 st); |
@@ -150,7 +151,7 @@ static int max_required_rx_slots(struct xenvif *vif) | |||
150 | return max; | 151 | return max; |
151 | } | 152 | } |
152 | 153 | ||
153 | int xen_netbk_rx_ring_full(struct xenvif *vif) | 154 | int xenvif_rx_ring_full(struct xenvif *vif) |
154 | { | 155 | { |
155 | RING_IDX peek = vif->rx_req_cons_peek; | 156 | RING_IDX peek = vif->rx_req_cons_peek; |
156 | RING_IDX needed = max_required_rx_slots(vif); | 157 | RING_IDX needed = max_required_rx_slots(vif); |
@@ -159,16 +160,16 @@ int xen_netbk_rx_ring_full(struct xenvif *vif) | |||
159 | ((vif->rx.rsp_prod_pvt + XEN_NETIF_RX_RING_SIZE - peek) < needed); | 160 | ((vif->rx.rsp_prod_pvt + XEN_NETIF_RX_RING_SIZE - peek) < needed); |
160 | } | 161 | } |
161 | 162 | ||
162 | int xen_netbk_must_stop_queue(struct xenvif *vif) | 163 | int xenvif_must_stop_queue(struct xenvif *vif) |
163 | { | 164 | { |
164 | if (!xen_netbk_rx_ring_full(vif)) | 165 | if (!xenvif_rx_ring_full(vif)) |
165 | return 0; | 166 | return 0; |
166 | 167 | ||
167 | vif->rx.sring->req_event = vif->rx_req_cons_peek + | 168 | vif->rx.sring->req_event = vif->rx_req_cons_peek + |
168 | max_required_rx_slots(vif); | 169 | max_required_rx_slots(vif); |
169 | mb(); /* request notification /then/ check the queue */ | 170 | mb(); /* request notification /then/ check the queue */ |
170 | 171 | ||
171 | return xen_netbk_rx_ring_full(vif); | 172 | return xenvif_rx_ring_full(vif); |
172 | } | 173 | } |
173 | 174 | ||
174 | /* | 175 | /* |
@@ -214,9 +215,9 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head) | |||
214 | /* | 215 | /* |
215 | * Figure out how many ring slots we're going to need to send @skb to | 216 | * Figure out how many ring slots we're going to need to send @skb to |
216 | * the guest. This function is essentially a dry run of | 217 | * the guest. This function is essentially a dry run of |
217 | * netbk_gop_frag_copy. | 218 | * xenvif_gop_frag_copy. |
218 | */ | 219 | */ |
219 | unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb) | 220 | unsigned int xenvif_count_skb_slots(struct xenvif *vif, struct sk_buff *skb) |
220 | { | 221 | { |
221 | unsigned int count; | 222 | unsigned int count; |
222 | int i, copy_off; | 223 | int i, copy_off; |
@@ -296,10 +297,10 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif, | |||
296 | * Set up the grant operations for this fragment. If it's a flipping | 297 | * Set up the grant operations for this fragment. If it's a flipping |
297 | * interface, we also set up the unmap request from here. | 298 | * interface, we also set up the unmap request from here. |
298 | */ | 299 | */ |
299 | static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, | 300 | static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, |
300 | struct netrx_pending_operations *npo, | 301 | struct netrx_pending_operations *npo, |
301 | struct page *page, unsigned long size, | 302 | struct page *page, unsigned long size, |
302 | unsigned long offset, int *head) | 303 | unsigned long offset, int *head) |
303 | { | 304 | { |
304 | struct gnttab_copy *copy_gop; | 305 | struct gnttab_copy *copy_gop; |
305 | struct xenvif_rx_meta *meta; | 306 | struct xenvif_rx_meta *meta; |
@@ -382,8 +383,8 @@ static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, | |||
382 | * zero GSO descriptors (for non-GSO packets) or one descriptor (for | 383 | * zero GSO descriptors (for non-GSO packets) or one descriptor (for |
383 | * frontend-side LRO). | 384 | * frontend-side LRO). |
384 | */ | 385 | */ |
385 | static int netbk_gop_skb(struct sk_buff *skb, | 386 | static int xenvif_gop_skb(struct sk_buff *skb, |
386 | struct netrx_pending_operations *npo) | 387 | struct netrx_pending_operations *npo) |
387 | { | 388 | { |
388 | struct xenvif *vif = netdev_priv(skb->dev); | 389 | struct xenvif *vif = netdev_priv(skb->dev); |
389 | int nr_frags = skb_shinfo(skb)->nr_frags; | 390 | int nr_frags = skb_shinfo(skb)->nr_frags; |
@@ -426,30 +427,30 @@ static int netbk_gop_skb(struct sk_buff *skb, | |||
426 | if (data + len > skb_tail_pointer(skb)) | 427 | if (data + len > skb_tail_pointer(skb)) |
427 | len = skb_tail_pointer(skb) - data; | 428 | len = skb_tail_pointer(skb) - data; |
428 | 429 | ||
429 | netbk_gop_frag_copy(vif, skb, npo, | 430 | xenvif_gop_frag_copy(vif, skb, npo, |
430 | virt_to_page(data), len, offset, &head); | 431 | virt_to_page(data), len, offset, &head); |
431 | data += len; | 432 | data += len; |
432 | } | 433 | } |
433 | 434 | ||
434 | for (i = 0; i < nr_frags; i++) { | 435 | for (i = 0; i < nr_frags; i++) { |
435 | netbk_gop_frag_copy(vif, skb, npo, | 436 | xenvif_gop_frag_copy(vif, skb, npo, |
436 | skb_frag_page(&skb_shinfo(skb)->frags[i]), | 437 | skb_frag_page(&skb_shinfo(skb)->frags[i]), |
437 | skb_frag_size(&skb_shinfo(skb)->frags[i]), | 438 | skb_frag_size(&skb_shinfo(skb)->frags[i]), |
438 | skb_shinfo(skb)->frags[i].page_offset, | 439 | skb_shinfo(skb)->frags[i].page_offset, |
439 | &head); | 440 | &head); |
440 | } | 441 | } |
441 | 442 | ||
442 | return npo->meta_prod - old_meta_prod; | 443 | return npo->meta_prod - old_meta_prod; |
443 | } | 444 | } |
444 | 445 | ||
445 | /* | 446 | /* |
446 | * This is a twin to netbk_gop_skb. Assume that netbk_gop_skb was | 447 | * This is a twin to xenvif_gop_skb. Assume that xenvif_gop_skb was |
447 | * used to set up the operations on the top of | 448 | * used to set up the operations on the top of |
448 | * netrx_pending_operations, which have since been done. Check that | 449 | * netrx_pending_operations, which have since been done. Check that |
449 | * they didn't give any errors and advance over them. | 450 | * they didn't give any errors and advance over them. |
450 | */ | 451 | */ |
451 | static int netbk_check_gop(struct xenvif *vif, int nr_meta_slots, | 452 | static int xenvif_check_gop(struct xenvif *vif, int nr_meta_slots, |
452 | struct netrx_pending_operations *npo) | 453 | struct netrx_pending_operations *npo) |
453 | { | 454 | { |
454 | struct gnttab_copy *copy_op; | 455 | struct gnttab_copy *copy_op; |
455 | int status = XEN_NETIF_RSP_OKAY; | 456 | int status = XEN_NETIF_RSP_OKAY; |
@@ -468,9 +469,9 @@ static int netbk_check_gop(struct xenvif *vif, int nr_meta_slots, | |||
468 | return status; | 469 | return status; |
469 | } | 470 | } |
470 | 471 | ||
471 | static void netbk_add_frag_responses(struct xenvif *vif, int status, | 472 | static void xenvif_add_frag_responses(struct xenvif *vif, int status, |
472 | struct xenvif_rx_meta *meta, | 473 | struct xenvif_rx_meta *meta, |
473 | int nr_meta_slots) | 474 | int nr_meta_slots) |
474 | { | 475 | { |
475 | int i; | 476 | int i; |
476 | unsigned long offset; | 477 | unsigned long offset; |
@@ -498,12 +499,12 @@ struct skb_cb_overlay { | |||
498 | int meta_slots_used; | 499 | int meta_slots_used; |
499 | }; | 500 | }; |
500 | 501 | ||
501 | static void xen_netbk_kick_thread(struct xenvif *vif) | 502 | static void xenvif_kick_thread(struct xenvif *vif) |
502 | { | 503 | { |
503 | wake_up(&vif->wq); | 504 | wake_up(&vif->wq); |
504 | } | 505 | } |
505 | 506 | ||
506 | void xen_netbk_rx_action(struct xenvif *vif) | 507 | void xenvif_rx_action(struct xenvif *vif) |
507 | { | 508 | { |
508 | s8 status; | 509 | s8 status; |
509 | u16 flags; | 510 | u16 flags; |
@@ -532,7 +533,7 @@ void xen_netbk_rx_action(struct xenvif *vif) | |||
532 | nr_frags = skb_shinfo(skb)->nr_frags; | 533 | nr_frags = skb_shinfo(skb)->nr_frags; |
533 | 534 | ||
534 | sco = (struct skb_cb_overlay *)skb->cb; | 535 | sco = (struct skb_cb_overlay *)skb->cb; |
535 | sco->meta_slots_used = netbk_gop_skb(skb, &npo); | 536 | sco->meta_slots_used = xenvif_gop_skb(skb, &npo); |
536 | 537 | ||
537 | count += nr_frags + 1; | 538 | count += nr_frags + 1; |
538 | 539 | ||
@@ -575,7 +576,7 @@ void xen_netbk_rx_action(struct xenvif *vif) | |||
575 | vif->dev->stats.tx_bytes += skb->len; | 576 | vif->dev->stats.tx_bytes += skb->len; |
576 | vif->dev->stats.tx_packets++; | 577 | vif->dev->stats.tx_packets++; |
577 | 578 | ||
578 | status = netbk_check_gop(vif, sco->meta_slots_used, &npo); | 579 | status = xenvif_check_gop(vif, sco->meta_slots_used, &npo); |
579 | 580 | ||
580 | if (sco->meta_slots_used == 1) | 581 | if (sco->meta_slots_used == 1) |
581 | flags = 0; | 582 | flags = 0; |
@@ -611,9 +612,9 @@ void xen_netbk_rx_action(struct xenvif *vif) | |||
611 | gso->flags = 0; | 612 | gso->flags = 0; |
612 | } | 613 | } |
613 | 614 | ||
614 | netbk_add_frag_responses(vif, status, | 615 | xenvif_add_frag_responses(vif, status, |
615 | vif->meta + npo.meta_cons + 1, | 616 | vif->meta + npo.meta_cons + 1, |
616 | sco->meta_slots_used); | 617 | sco->meta_slots_used); |
617 | 618 | ||
618 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret); | 619 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret); |
619 | 620 | ||
@@ -631,17 +632,17 @@ void xen_netbk_rx_action(struct xenvif *vif) | |||
631 | 632 | ||
632 | /* More work to do? */ | 633 | /* More work to do? */ |
633 | if (!skb_queue_empty(&vif->rx_queue)) | 634 | if (!skb_queue_empty(&vif->rx_queue)) |
634 | xen_netbk_kick_thread(vif); | 635 | xenvif_kick_thread(vif); |
635 | } | 636 | } |
636 | 637 | ||
637 | void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb) | 638 | void xenvif_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb) |
638 | { | 639 | { |
639 | skb_queue_tail(&vif->rx_queue, skb); | 640 | skb_queue_tail(&vif->rx_queue, skb); |
640 | 641 | ||
641 | xen_netbk_kick_thread(vif); | 642 | xenvif_kick_thread(vif); |
642 | } | 643 | } |
643 | 644 | ||
644 | void xen_netbk_check_rx_xenvif(struct xenvif *vif) | 645 | void xenvif_check_rx_xenvif(struct xenvif *vif) |
645 | { | 646 | { |
646 | int more_to_do; | 647 | int more_to_do; |
647 | 648 | ||
@@ -675,11 +676,11 @@ static void tx_credit_callback(unsigned long data) | |||
675 | { | 676 | { |
676 | struct xenvif *vif = (struct xenvif *)data; | 677 | struct xenvif *vif = (struct xenvif *)data; |
677 | tx_add_credit(vif); | 678 | tx_add_credit(vif); |
678 | xen_netbk_check_rx_xenvif(vif); | 679 | xenvif_check_rx_xenvif(vif); |
679 | } | 680 | } |
680 | 681 | ||
681 | static void netbk_tx_err(struct xenvif *vif, | 682 | static void xenvif_tx_err(struct xenvif *vif, |
682 | struct xen_netif_tx_request *txp, RING_IDX end) | 683 | struct xen_netif_tx_request *txp, RING_IDX end) |
683 | { | 684 | { |
684 | RING_IDX cons = vif->tx.req_cons; | 685 | RING_IDX cons = vif->tx.req_cons; |
685 | 686 | ||
@@ -692,16 +693,16 @@ static void netbk_tx_err(struct xenvif *vif, | |||
692 | vif->tx.req_cons = cons; | 693 | vif->tx.req_cons = cons; |
693 | } | 694 | } |
694 | 695 | ||
695 | static void netbk_fatal_tx_err(struct xenvif *vif) | 696 | static void xenvif_fatal_tx_err(struct xenvif *vif) |
696 | { | 697 | { |
697 | netdev_err(vif->dev, "fatal error; disabling device\n"); | 698 | netdev_err(vif->dev, "fatal error; disabling device\n"); |
698 | xenvif_carrier_off(vif); | 699 | xenvif_carrier_off(vif); |
699 | } | 700 | } |
700 | 701 | ||
701 | static int netbk_count_requests(struct xenvif *vif, | 702 | static int xenvif_count_requests(struct xenvif *vif, |
702 | struct xen_netif_tx_request *first, | 703 | struct xen_netif_tx_request *first, |
703 | struct xen_netif_tx_request *txp, | 704 | struct xen_netif_tx_request *txp, |
704 | int work_to_do) | 705 | int work_to_do) |
705 | { | 706 | { |
706 | RING_IDX cons = vif->tx.req_cons; | 707 | RING_IDX cons = vif->tx.req_cons; |
707 | int slots = 0; | 708 | int slots = 0; |
@@ -718,7 +719,7 @@ static int netbk_count_requests(struct xenvif *vif, | |||
718 | netdev_err(vif->dev, | 719 | netdev_err(vif->dev, |
719 | "Asked for %d slots but exceeds this limit\n", | 720 | "Asked for %d slots but exceeds this limit\n", |
720 | work_to_do); | 721 | work_to_do); |
721 | netbk_fatal_tx_err(vif); | 722 | xenvif_fatal_tx_err(vif); |
722 | return -ENODATA; | 723 | return -ENODATA; |
723 | } | 724 | } |
724 | 725 | ||
@@ -729,7 +730,7 @@ static int netbk_count_requests(struct xenvif *vif, | |||
729 | netdev_err(vif->dev, | 730 | netdev_err(vif->dev, |
730 | "Malicious frontend using %d slots, threshold %u\n", | 731 | "Malicious frontend using %d slots, threshold %u\n", |
731 | slots, fatal_skb_slots); | 732 | slots, fatal_skb_slots); |
732 | netbk_fatal_tx_err(vif); | 733 | xenvif_fatal_tx_err(vif); |
733 | return -E2BIG; | 734 | return -E2BIG; |
734 | } | 735 | } |
735 | 736 | ||
@@ -777,7 +778,7 @@ static int netbk_count_requests(struct xenvif *vif, | |||
777 | if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) { | 778 | if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) { |
778 | netdev_err(vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n", | 779 | netdev_err(vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n", |
779 | txp->offset, txp->size); | 780 | txp->offset, txp->size); |
780 | netbk_fatal_tx_err(vif); | 781 | xenvif_fatal_tx_err(vif); |
781 | return -EINVAL; | 782 | return -EINVAL; |
782 | } | 783 | } |
783 | 784 | ||
@@ -789,15 +790,15 @@ static int netbk_count_requests(struct xenvif *vif, | |||
789 | } while (more_data); | 790 | } while (more_data); |
790 | 791 | ||
791 | if (drop_err) { | 792 | if (drop_err) { |
792 | netbk_tx_err(vif, first, cons + slots); | 793 | xenvif_tx_err(vif, first, cons + slots); |
793 | return drop_err; | 794 | return drop_err; |
794 | } | 795 | } |
795 | 796 | ||
796 | return slots; | 797 | return slots; |
797 | } | 798 | } |
798 | 799 | ||
799 | static struct page *xen_netbk_alloc_page(struct xenvif *vif, | 800 | static struct page *xenvif_alloc_page(struct xenvif *vif, |
800 | u16 pending_idx) | 801 | u16 pending_idx) |
801 | { | 802 | { |
802 | struct page *page; | 803 | struct page *page; |
803 | 804 | ||
@@ -809,10 +810,10 @@ static struct page *xen_netbk_alloc_page(struct xenvif *vif, | |||
809 | return page; | 810 | return page; |
810 | } | 811 | } |
811 | 812 | ||
812 | static struct gnttab_copy *xen_netbk_get_requests(struct xenvif *vif, | 813 | static struct gnttab_copy *xenvif_get_requests(struct xenvif *vif, |
813 | struct sk_buff *skb, | 814 | struct sk_buff *skb, |
814 | struct xen_netif_tx_request *txp, | 815 | struct xen_netif_tx_request *txp, |
815 | struct gnttab_copy *gop) | 816 | struct gnttab_copy *gop) |
816 | { | 817 | { |
817 | struct skb_shared_info *shinfo = skb_shinfo(skb); | 818 | struct skb_shared_info *shinfo = skb_shinfo(skb); |
818 | skb_frag_t *frags = shinfo->frags; | 819 | skb_frag_t *frags = shinfo->frags; |
@@ -835,7 +836,7 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xenvif *vif, | |||
835 | 836 | ||
836 | /* Coalesce tx requests, at this point the packet passed in | 837 | /* Coalesce tx requests, at this point the packet passed in |
837 | * should be <= 64K. Any packets larger than 64K have been | 838 | * should be <= 64K. Any packets larger than 64K have been |
838 | * handled in netbk_count_requests(). | 839 | * handled in xenvif_count_requests(). |
839 | */ | 840 | */ |
840 | for (shinfo->nr_frags = slot = start; slot < nr_slots; | 841 | for (shinfo->nr_frags = slot = start; slot < nr_slots; |
841 | shinfo->nr_frags++) { | 842 | shinfo->nr_frags++) { |
@@ -918,20 +919,20 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xenvif *vif, | |||
918 | err: | 919 | err: |
919 | /* Unwind, freeing all pages and sending error responses. */ | 920 | /* Unwind, freeing all pages and sending error responses. */ |
920 | while (shinfo->nr_frags-- > start) { | 921 | while (shinfo->nr_frags-- > start) { |
921 | xen_netbk_idx_release(vif, | 922 | xenvif_idx_release(vif, |
922 | frag_get_pending_idx(&frags[shinfo->nr_frags]), | 923 | frag_get_pending_idx(&frags[shinfo->nr_frags]), |
923 | XEN_NETIF_RSP_ERROR); | 924 | XEN_NETIF_RSP_ERROR); |
924 | } | 925 | } |
925 | /* The head too, if necessary. */ | 926 | /* The head too, if necessary. */ |
926 | if (start) | 927 | if (start) |
927 | xen_netbk_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR); | 928 | xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR); |
928 | 929 | ||
929 | return NULL; | 930 | return NULL; |
930 | } | 931 | } |
931 | 932 | ||
932 | static int xen_netbk_tx_check_gop(struct xenvif *vif, | 933 | static int xenvif_tx_check_gop(struct xenvif *vif, |
933 | struct sk_buff *skb, | 934 | struct sk_buff *skb, |
934 | struct gnttab_copy **gopp) | 935 | struct gnttab_copy **gopp) |
935 | { | 936 | { |
936 | struct gnttab_copy *gop = *gopp; | 937 | struct gnttab_copy *gop = *gopp; |
937 | u16 pending_idx = *((u16 *)skb->data); | 938 | u16 pending_idx = *((u16 *)skb->data); |
@@ -944,7 +945,7 @@ static int xen_netbk_tx_check_gop(struct xenvif *vif, | |||
944 | /* Check status of header. */ | 945 | /* Check status of header. */ |
945 | err = gop->status; | 946 | err = gop->status; |
946 | if (unlikely(err)) | 947 | if (unlikely(err)) |
947 | xen_netbk_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR); | 948 | xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR); |
948 | 949 | ||
949 | /* Skip first skb fragment if it is on same page as header fragment. */ | 950 | /* Skip first skb fragment if it is on same page as header fragment. */ |
950 | start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx); | 951 | start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx); |
@@ -968,13 +969,13 @@ static int xen_netbk_tx_check_gop(struct xenvif *vif, | |||
968 | if (likely(!newerr)) { | 969 | if (likely(!newerr)) { |
969 | /* Had a previous error? Invalidate this fragment. */ | 970 | /* Had a previous error? Invalidate this fragment. */ |
970 | if (unlikely(err)) | 971 | if (unlikely(err)) |
971 | xen_netbk_idx_release(vif, pending_idx, | 972 | xenvif_idx_release(vif, pending_idx, |
972 | XEN_NETIF_RSP_OKAY); | 973 | XEN_NETIF_RSP_OKAY); |
973 | continue; | 974 | continue; |
974 | } | 975 | } |
975 | 976 | ||
976 | /* Error on this fragment: respond to client with an error. */ | 977 | /* Error on this fragment: respond to client with an error. */ |
977 | xen_netbk_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR); | 978 | xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR); |
978 | 979 | ||
979 | /* Not the first error? Preceding frags already invalidated. */ | 980 | /* Not the first error? Preceding frags already invalidated. */ |
980 | if (err) | 981 | if (err) |
@@ -982,11 +983,11 @@ static int xen_netbk_tx_check_gop(struct xenvif *vif, | |||
982 | 983 | ||
983 | /* First error: invalidate header and preceding fragments. */ | 984 | /* First error: invalidate header and preceding fragments. */ |
984 | pending_idx = *((u16 *)skb->data); | 985 | pending_idx = *((u16 *)skb->data); |
985 | xen_netbk_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY); | 986 | xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY); |
986 | for (j = start; j < i; j++) { | 987 | for (j = start; j < i; j++) { |
987 | pending_idx = frag_get_pending_idx(&shinfo->frags[j]); | 988 | pending_idx = frag_get_pending_idx(&shinfo->frags[j]); |
988 | xen_netbk_idx_release(vif, pending_idx, | 989 | xenvif_idx_release(vif, pending_idx, |
989 | XEN_NETIF_RSP_OKAY); | 990 | XEN_NETIF_RSP_OKAY); |
990 | } | 991 | } |
991 | 992 | ||
992 | /* Remember the error: invalidate all subsequent fragments. */ | 993 | /* Remember the error: invalidate all subsequent fragments. */ |
@@ -997,7 +998,7 @@ static int xen_netbk_tx_check_gop(struct xenvif *vif, | |||
997 | return err; | 998 | return err; |
998 | } | 999 | } |
999 | 1000 | ||
1000 | static void xen_netbk_fill_frags(struct xenvif *vif, struct sk_buff *skb) | 1001 | static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb) |
1001 | { | 1002 | { |
1002 | struct skb_shared_info *shinfo = skb_shinfo(skb); | 1003 | struct skb_shared_info *shinfo = skb_shinfo(skb); |
1003 | int nr_frags = shinfo->nr_frags; | 1004 | int nr_frags = shinfo->nr_frags; |
@@ -1018,13 +1019,13 @@ static void xen_netbk_fill_frags(struct xenvif *vif, struct sk_buff *skb) | |||
1018 | skb->data_len += txp->size; | 1019 | skb->data_len += txp->size; |
1019 | skb->truesize += txp->size; | 1020 | skb->truesize += txp->size; |
1020 | 1021 | ||
1021 | /* Take an extra reference to offset xen_netbk_idx_release */ | 1022 | /* Take an extra reference to offset xenvif_idx_release */ |
1022 | get_page(vif->mmap_pages[pending_idx]); | 1023 | get_page(vif->mmap_pages[pending_idx]); |
1023 | xen_netbk_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY); | 1024 | xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY); |
1024 | } | 1025 | } |
1025 | } | 1026 | } |
1026 | 1027 | ||
1027 | static int xen_netbk_get_extras(struct xenvif *vif, | 1028 | static int xenvif_get_extras(struct xenvif *vif, |
1028 | struct xen_netif_extra_info *extras, | 1029 | struct xen_netif_extra_info *extras, |
1029 | int work_to_do) | 1030 | int work_to_do) |
1030 | { | 1031 | { |
@@ -1034,7 +1035,7 @@ static int xen_netbk_get_extras(struct xenvif *vif, | |||
1034 | do { | 1035 | do { |
1035 | if (unlikely(work_to_do-- <= 0)) { | 1036 | if (unlikely(work_to_do-- <= 0)) { |
1036 | netdev_err(vif->dev, "Missing extra info\n"); | 1037 | netdev_err(vif->dev, "Missing extra info\n"); |
1037 | netbk_fatal_tx_err(vif); | 1038 | xenvif_fatal_tx_err(vif); |
1038 | return -EBADR; | 1039 | return -EBADR; |
1039 | } | 1040 | } |
1040 | 1041 | ||
@@ -1045,7 +1046,7 @@ static int xen_netbk_get_extras(struct xenvif *vif, | |||
1045 | vif->tx.req_cons = ++cons; | 1046 | vif->tx.req_cons = ++cons; |
1046 | netdev_err(vif->dev, | 1047 | netdev_err(vif->dev, |
1047 | "Invalid extra type: %d\n", extra.type); | 1048 | "Invalid extra type: %d\n", extra.type); |
1048 | netbk_fatal_tx_err(vif); | 1049 | xenvif_fatal_tx_err(vif); |
1049 | return -EINVAL; | 1050 | return -EINVAL; |
1050 | } | 1051 | } |
1051 | 1052 | ||
@@ -1056,20 +1057,20 @@ static int xen_netbk_get_extras(struct xenvif *vif, | |||
1056 | return work_to_do; | 1057 | return work_to_do; |
1057 | } | 1058 | } |
1058 | 1059 | ||
1059 | static int netbk_set_skb_gso(struct xenvif *vif, | 1060 | static int xenvif_set_skb_gso(struct xenvif *vif, |
1060 | struct sk_buff *skb, | 1061 | struct sk_buff *skb, |
1061 | struct xen_netif_extra_info *gso) | 1062 | struct xen_netif_extra_info *gso) |
1062 | { | 1063 | { |
1063 | if (!gso->u.gso.size) { | 1064 | if (!gso->u.gso.size) { |
1064 | netdev_err(vif->dev, "GSO size must not be zero.\n"); | 1065 | netdev_err(vif->dev, "GSO size must not be zero.\n"); |
1065 | netbk_fatal_tx_err(vif); | 1066 | xenvif_fatal_tx_err(vif); |
1066 | return -EINVAL; | 1067 | return -EINVAL; |
1067 | } | 1068 | } |
1068 | 1069 | ||
1069 | /* Currently only TCPv4 S.O. is supported. */ | 1070 | /* Currently only TCPv4 S.O. is supported. */ |
1070 | if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) { | 1071 | if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) { |
1071 | netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type); | 1072 | netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type); |
1072 | netbk_fatal_tx_err(vif); | 1073 | xenvif_fatal_tx_err(vif); |
1073 | return -EINVAL; | 1074 | return -EINVAL; |
1074 | } | 1075 | } |
1075 | 1076 | ||
@@ -1180,7 +1181,7 @@ static bool tx_credit_exceeded(struct xenvif *vif, unsigned size) | |||
1180 | return false; | 1181 | return false; |
1181 | } | 1182 | } |
1182 | 1183 | ||
1183 | static unsigned xen_netbk_tx_build_gops(struct xenvif *vif) | 1184 | static unsigned xenvif_tx_build_gops(struct xenvif *vif) |
1184 | { | 1185 | { |
1185 | struct gnttab_copy *gop = vif->tx_copy_ops, *request_gop; | 1186 | struct gnttab_copy *gop = vif->tx_copy_ops, *request_gop; |
1186 | struct sk_buff *skb; | 1187 | struct sk_buff *skb; |
@@ -1205,7 +1206,7 @@ static unsigned xen_netbk_tx_build_gops(struct xenvif *vif) | |||
1205 | "req_prod %d, req_cons %d, size %ld\n", | 1206 | "req_prod %d, req_cons %d, size %ld\n", |
1206 | vif->tx.sring->req_prod, vif->tx.req_cons, | 1207 | vif->tx.sring->req_prod, vif->tx.req_cons, |
1207 | XEN_NETIF_TX_RING_SIZE); | 1208 | XEN_NETIF_TX_RING_SIZE); |
1208 | netbk_fatal_tx_err(vif); | 1209 | xenvif_fatal_tx_err(vif); |
1209 | continue; | 1210 | continue; |
1210 | } | 1211 | } |
1211 | 1212 | ||
@@ -1229,14 +1230,14 @@ static unsigned xen_netbk_tx_build_gops(struct xenvif *vif) | |||
1229 | 1230 | ||
1230 | memset(extras, 0, sizeof(extras)); | 1231 | memset(extras, 0, sizeof(extras)); |
1231 | if (txreq.flags & XEN_NETTXF_extra_info) { | 1232 | if (txreq.flags & XEN_NETTXF_extra_info) { |
1232 | work_to_do = xen_netbk_get_extras(vif, extras, | 1233 | work_to_do = xenvif_get_extras(vif, extras, |
1233 | work_to_do); | 1234 | work_to_do); |
1234 | idx = vif->tx.req_cons; | 1235 | idx = vif->tx.req_cons; |
1235 | if (unlikely(work_to_do < 0)) | 1236 | if (unlikely(work_to_do < 0)) |
1236 | break; | 1237 | break; |
1237 | } | 1238 | } |
1238 | 1239 | ||
1239 | ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do); | 1240 | ret = xenvif_count_requests(vif, &txreq, txfrags, work_to_do); |
1240 | if (unlikely(ret < 0)) | 1241 | if (unlikely(ret < 0)) |
1241 | break; | 1242 | break; |
1242 | 1243 | ||
@@ -1245,7 +1246,7 @@ static unsigned xen_netbk_tx_build_gops(struct xenvif *vif) | |||
1245 | if (unlikely(txreq.size < ETH_HLEN)) { | 1246 | if (unlikely(txreq.size < ETH_HLEN)) { |
1246 | netdev_dbg(vif->dev, | 1247 | netdev_dbg(vif->dev, |
1247 | "Bad packet size: %d\n", txreq.size); | 1248 | "Bad packet size: %d\n", txreq.size); |
1248 | netbk_tx_err(vif, &txreq, idx); | 1249 | xenvif_tx_err(vif, &txreq, idx); |
1249 | break; | 1250 | break; |
1250 | } | 1251 | } |
1251 | 1252 | ||
@@ -1255,7 +1256,7 @@ static unsigned xen_netbk_tx_build_gops(struct xenvif *vif) | |||
1255 | "txreq.offset: %x, size: %u, end: %lu\n", | 1256 | "txreq.offset: %x, size: %u, end: %lu\n", |
1256 | txreq.offset, txreq.size, | 1257 | txreq.offset, txreq.size, |
1257 | (txreq.offset&~PAGE_MASK) + txreq.size); | 1258 | (txreq.offset&~PAGE_MASK) + txreq.size); |
1258 | netbk_fatal_tx_err(vif); | 1259 | xenvif_fatal_tx_err(vif); |
1259 | break; | 1260 | break; |
1260 | } | 1261 | } |
1261 | 1262 | ||
@@ -1271,7 +1272,7 @@ static unsigned xen_netbk_tx_build_gops(struct xenvif *vif) | |||
1271 | if (unlikely(skb == NULL)) { | 1272 | if (unlikely(skb == NULL)) { |
1272 | netdev_dbg(vif->dev, | 1273 | netdev_dbg(vif->dev, |
1273 | "Can't allocate a skb in start_xmit.\n"); | 1274 | "Can't allocate a skb in start_xmit.\n"); |
1274 | netbk_tx_err(vif, &txreq, idx); | 1275 | xenvif_tx_err(vif, &txreq, idx); |
1275 | break; | 1276 | break; |
1276 | } | 1277 | } |
1277 | 1278 | ||
@@ -1282,18 +1283,18 @@ static unsigned xen_netbk_tx_build_gops(struct xenvif *vif) | |||
1282 | struct xen_netif_extra_info *gso; | 1283 | struct xen_netif_extra_info *gso; |
1283 | gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; | 1284 | gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; |
1284 | 1285 | ||
1285 | if (netbk_set_skb_gso(vif, skb, gso)) { | 1286 | if (xenvif_set_skb_gso(vif, skb, gso)) { |
1286 | /* Failure in netbk_set_skb_gso is fatal. */ | 1287 | /* Failure in xenvif_set_skb_gso is fatal. */ |
1287 | kfree_skb(skb); | 1288 | kfree_skb(skb); |
1288 | break; | 1289 | break; |
1289 | } | 1290 | } |
1290 | } | 1291 | } |
1291 | 1292 | ||
1292 | /* XXX could copy straight to head */ | 1293 | /* XXX could copy straight to head */ |
1293 | page = xen_netbk_alloc_page(vif, pending_idx); | 1294 | page = xenvif_alloc_page(vif, pending_idx); |
1294 | if (!page) { | 1295 | if (!page) { |
1295 | kfree_skb(skb); | 1296 | kfree_skb(skb); |
1296 | netbk_tx_err(vif, &txreq, idx); | 1297 | xenvif_tx_err(vif, &txreq, idx); |
1297 | break; | 1298 | break; |
1298 | } | 1299 | } |
1299 | 1300 | ||
@@ -1329,10 +1330,10 @@ static unsigned xen_netbk_tx_build_gops(struct xenvif *vif) | |||
1329 | 1330 | ||
1330 | vif->pending_cons++; | 1331 | vif->pending_cons++; |
1331 | 1332 | ||
1332 | request_gop = xen_netbk_get_requests(vif, skb, txfrags, gop); | 1333 | request_gop = xenvif_get_requests(vif, skb, txfrags, gop); |
1333 | if (request_gop == NULL) { | 1334 | if (request_gop == NULL) { |
1334 | kfree_skb(skb); | 1335 | kfree_skb(skb); |
1335 | netbk_tx_err(vif, &txreq, idx); | 1336 | xenvif_tx_err(vif, &txreq, idx); |
1336 | break; | 1337 | break; |
1337 | } | 1338 | } |
1338 | gop = request_gop; | 1339 | gop = request_gop; |
@@ -1349,7 +1350,7 @@ static unsigned xen_netbk_tx_build_gops(struct xenvif *vif) | |||
1349 | } | 1350 | } |
1350 | 1351 | ||
1351 | 1352 | ||
1352 | static int xen_netbk_tx_submit(struct xenvif *vif, int budget) | 1353 | static int xenvif_tx_submit(struct xenvif *vif, int budget) |
1353 | { | 1354 | { |
1354 | struct gnttab_copy *gop = vif->tx_copy_ops; | 1355 | struct gnttab_copy *gop = vif->tx_copy_ops; |
1355 | struct sk_buff *skb; | 1356 | struct sk_buff *skb; |
@@ -1365,7 +1366,7 @@ static int xen_netbk_tx_submit(struct xenvif *vif, int budget) | |||
1365 | txp = &vif->pending_tx_info[pending_idx].req; | 1366 | txp = &vif->pending_tx_info[pending_idx].req; |
1366 | 1367 | ||
1367 | /* Check the remap error code. */ | 1368 | /* Check the remap error code. */ |
1368 | if (unlikely(xen_netbk_tx_check_gop(vif, skb, &gop))) { | 1369 | if (unlikely(xenvif_tx_check_gop(vif, skb, &gop))) { |
1369 | netdev_dbg(vif->dev, "netback grant failed.\n"); | 1370 | netdev_dbg(vif->dev, "netback grant failed.\n"); |
1370 | skb_shinfo(skb)->nr_frags = 0; | 1371 | skb_shinfo(skb)->nr_frags = 0; |
1371 | kfree_skb(skb); | 1372 | kfree_skb(skb); |
@@ -1382,8 +1383,8 @@ static int xen_netbk_tx_submit(struct xenvif *vif, int budget) | |||
1382 | txp->size -= data_len; | 1383 | txp->size -= data_len; |
1383 | } else { | 1384 | } else { |
1384 | /* Schedule a response immediately. */ | 1385 | /* Schedule a response immediately. */ |
1385 | xen_netbk_idx_release(vif, pending_idx, | 1386 | xenvif_idx_release(vif, pending_idx, |
1386 | XEN_NETIF_RSP_OKAY); | 1387 | XEN_NETIF_RSP_OKAY); |
1387 | } | 1388 | } |
1388 | 1389 | ||
1389 | if (txp->flags & XEN_NETTXF_csum_blank) | 1390 | if (txp->flags & XEN_NETTXF_csum_blank) |
@@ -1391,7 +1392,7 @@ static int xen_netbk_tx_submit(struct xenvif *vif, int budget) | |||
1391 | else if (txp->flags & XEN_NETTXF_data_validated) | 1392 | else if (txp->flags & XEN_NETTXF_data_validated) |
1392 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 1393 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
1393 | 1394 | ||
1394 | xen_netbk_fill_frags(vif, skb); | 1395 | xenvif_fill_frags(vif, skb); |
1395 | 1396 | ||
1396 | /* | 1397 | /* |
1397 | * If the initial fragment was < PKT_PROT_LEN then | 1398 | * If the initial fragment was < PKT_PROT_LEN then |
@@ -1428,7 +1429,7 @@ static int xen_netbk_tx_submit(struct xenvif *vif, int budget) | |||
1428 | } | 1429 | } |
1429 | 1430 | ||
1430 | /* Called after netfront has transmitted */ | 1431 | /* Called after netfront has transmitted */ |
1431 | int xen_netbk_tx_action(struct xenvif *vif, int budget) | 1432 | int xenvif_tx_action(struct xenvif *vif, int budget) |
1432 | { | 1433 | { |
1433 | unsigned nr_gops; | 1434 | unsigned nr_gops; |
1434 | int work_done; | 1435 | int work_done; |
@@ -1436,20 +1437,20 @@ int xen_netbk_tx_action(struct xenvif *vif, int budget) | |||
1436 | if (unlikely(!tx_work_todo(vif))) | 1437 | if (unlikely(!tx_work_todo(vif))) |
1437 | return 0; | 1438 | return 0; |
1438 | 1439 | ||
1439 | nr_gops = xen_netbk_tx_build_gops(vif); | 1440 | nr_gops = xenvif_tx_build_gops(vif); |
1440 | 1441 | ||
1441 | if (nr_gops == 0) | 1442 | if (nr_gops == 0) |
1442 | return 0; | 1443 | return 0; |
1443 | 1444 | ||
1444 | gnttab_batch_copy(vif->tx_copy_ops, nr_gops); | 1445 | gnttab_batch_copy(vif->tx_copy_ops, nr_gops); |
1445 | 1446 | ||
1446 | work_done = xen_netbk_tx_submit(vif, nr_gops); | 1447 | work_done = xenvif_tx_submit(vif, nr_gops); |
1447 | 1448 | ||
1448 | return work_done; | 1449 | return work_done; |
1449 | } | 1450 | } |
1450 | 1451 | ||
1451 | static void xen_netbk_idx_release(struct xenvif *vif, u16 pending_idx, | 1452 | static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx, |
1452 | u8 status) | 1453 | u8 status) |
1453 | { | 1454 | { |
1454 | struct pending_tx_info *pending_tx_info; | 1455 | struct pending_tx_info *pending_tx_info; |
1455 | pending_ring_idx_t head; | 1456 | pending_ring_idx_t head; |
@@ -1554,7 +1555,7 @@ static inline int tx_work_todo(struct xenvif *vif) | |||
1554 | return 0; | 1555 | return 0; |
1555 | } | 1556 | } |
1556 | 1557 | ||
1557 | void xen_netbk_unmap_frontend_rings(struct xenvif *vif) | 1558 | void xenvif_unmap_frontend_rings(struct xenvif *vif) |
1558 | { | 1559 | { |
1559 | if (vif->tx.sring) | 1560 | if (vif->tx.sring) |
1560 | xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif), | 1561 | xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif), |
@@ -1564,9 +1565,9 @@ void xen_netbk_unmap_frontend_rings(struct xenvif *vif) | |||
1564 | vif->rx.sring); | 1565 | vif->rx.sring); |
1565 | } | 1566 | } |
1566 | 1567 | ||
1567 | int xen_netbk_map_frontend_rings(struct xenvif *vif, | 1568 | int xenvif_map_frontend_rings(struct xenvif *vif, |
1568 | grant_ref_t tx_ring_ref, | 1569 | grant_ref_t tx_ring_ref, |
1569 | grant_ref_t rx_ring_ref) | 1570 | grant_ref_t rx_ring_ref) |
1570 | { | 1571 | { |
1571 | void *addr; | 1572 | void *addr; |
1572 | struct xen_netif_tx_sring *txs; | 1573 | struct xen_netif_tx_sring *txs; |
@@ -1595,11 +1596,11 @@ int xen_netbk_map_frontend_rings(struct xenvif *vif, | |||
1595 | return 0; | 1596 | return 0; |
1596 | 1597 | ||
1597 | err: | 1598 | err: |
1598 | xen_netbk_unmap_frontend_rings(vif); | 1599 | xenvif_unmap_frontend_rings(vif); |
1599 | return err; | 1600 | return err; |
1600 | } | 1601 | } |
1601 | 1602 | ||
1602 | int xen_netbk_kthread(void *data) | 1603 | int xenvif_kthread(void *data) |
1603 | { | 1604 | { |
1604 | struct xenvif *vif = data; | 1605 | struct xenvif *vif = data; |
1605 | 1606 | ||
@@ -1611,7 +1612,7 @@ int xen_netbk_kthread(void *data) | |||
1611 | break; | 1612 | break; |
1612 | 1613 | ||
1613 | if (rx_work_todo(vif)) | 1614 | if (rx_work_todo(vif)) |
1614 | xen_netbk_rx_action(vif); | 1615 | xenvif_rx_action(vif); |
1615 | 1616 | ||
1616 | cond_resched(); | 1617 | cond_resched(); |
1617 | } | 1618 | } |