diff options
author | Paul Durrant <Paul.Durrant@citrix.com> | 2016-05-13 04:37:26 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2016-05-16 13:35:56 -0400 |
commit | 4e15ee2cb46fed730fe6f0195a86d44e5aeef129 (patch) | |
tree | 852b76177b94ed322cc38f54bcce7cc052abea65 /drivers/net/xen-netback | |
parent | 1ca467343240be738c8e61edd4b421ca9ebe2d77 (diff) |
xen-netback: add control ring boilerplate
My recent patch to include/xen/interface/io/netif.h defines a new shared
ring (in addition to the rx and tx rings) for passing control messages
from a VM frontend driver to a backend driver.
This patch adds the necessary code to xen-netback to map this new shared
ring, should it be created by a frontend, but does not add implementations
for any of the defined protocol messages. These are added in a subsequent
patch for clarity.
Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
Acked-by: Wei Liu <wei.liu2@citrix.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/xen-netback')
-rw-r--r-- | drivers/net/xen-netback/common.h | 28 | ||||
-rw-r--r-- | drivers/net/xen-netback/interface.c | 101 | ||||
-rw-r--r-- | drivers/net/xen-netback/netback.c | 99 | ||||
-rw-r--r-- | drivers/net/xen-netback/xenbus.c | 79 |
4 files changed, 277 insertions, 30 deletions
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index f44b38846420..093a12abf71f 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h | |||
@@ -260,6 +260,11 @@ struct xenvif { | |||
260 | struct dentry *xenvif_dbg_root; | 260 | struct dentry *xenvif_dbg_root; |
261 | #endif | 261 | #endif |
262 | 262 | ||
263 | struct xen_netif_ctrl_back_ring ctrl; | ||
264 | struct task_struct *ctrl_task; | ||
265 | wait_queue_head_t ctrl_wq; | ||
266 | unsigned int ctrl_irq; | ||
267 | |||
263 | /* Miscellaneous private stuff. */ | 268 | /* Miscellaneous private stuff. */ |
264 | struct net_device *dev; | 269 | struct net_device *dev; |
265 | }; | 270 | }; |
@@ -285,10 +290,15 @@ struct xenvif *xenvif_alloc(struct device *parent, | |||
285 | int xenvif_init_queue(struct xenvif_queue *queue); | 290 | int xenvif_init_queue(struct xenvif_queue *queue); |
286 | void xenvif_deinit_queue(struct xenvif_queue *queue); | 291 | void xenvif_deinit_queue(struct xenvif_queue *queue); |
287 | 292 | ||
288 | int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref, | 293 | int xenvif_connect_data(struct xenvif_queue *queue, |
289 | unsigned long rx_ring_ref, unsigned int tx_evtchn, | 294 | unsigned long tx_ring_ref, |
290 | unsigned int rx_evtchn); | 295 | unsigned long rx_ring_ref, |
291 | void xenvif_disconnect(struct xenvif *vif); | 296 | unsigned int tx_evtchn, |
297 | unsigned int rx_evtchn); | ||
298 | void xenvif_disconnect_data(struct xenvif *vif); | ||
299 | int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref, | ||
300 | unsigned int evtchn); | ||
301 | void xenvif_disconnect_ctrl(struct xenvif *vif); | ||
292 | void xenvif_free(struct xenvif *vif); | 302 | void xenvif_free(struct xenvif *vif); |
293 | 303 | ||
294 | int xenvif_xenbus_init(void); | 304 | int xenvif_xenbus_init(void); |
@@ -300,10 +310,10 @@ int xenvif_queue_stopped(struct xenvif_queue *queue); | |||
300 | void xenvif_wake_queue(struct xenvif_queue *queue); | 310 | void xenvif_wake_queue(struct xenvif_queue *queue); |
301 | 311 | ||
302 | /* (Un)Map communication rings. */ | 312 | /* (Un)Map communication rings. */ |
303 | void xenvif_unmap_frontend_rings(struct xenvif_queue *queue); | 313 | void xenvif_unmap_frontend_data_rings(struct xenvif_queue *queue); |
304 | int xenvif_map_frontend_rings(struct xenvif_queue *queue, | 314 | int xenvif_map_frontend_data_rings(struct xenvif_queue *queue, |
305 | grant_ref_t tx_ring_ref, | 315 | grant_ref_t tx_ring_ref, |
306 | grant_ref_t rx_ring_ref); | 316 | grant_ref_t rx_ring_ref); |
307 | 317 | ||
308 | /* Check for SKBs from frontend and schedule backend processing */ | 318 | /* Check for SKBs from frontend and schedule backend processing */ |
309 | void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue); | 319 | void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue); |
@@ -318,6 +328,8 @@ void xenvif_kick_thread(struct xenvif_queue *queue); | |||
318 | 328 | ||
319 | int xenvif_dealloc_kthread(void *data); | 329 | int xenvif_dealloc_kthread(void *data); |
320 | 330 | ||
331 | int xenvif_ctrl_kthread(void *data); | ||
332 | |||
321 | void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb); | 333 | void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb); |
322 | 334 | ||
323 | void xenvif_carrier_on(struct xenvif *vif); | 335 | void xenvif_carrier_on(struct xenvif *vif); |
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index f5231a2dd2ac..78a10d2af101 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c | |||
@@ -128,6 +128,15 @@ irqreturn_t xenvif_interrupt(int irq, void *dev_id) | |||
128 | return IRQ_HANDLED; | 128 | return IRQ_HANDLED; |
129 | } | 129 | } |
130 | 130 | ||
131 | irqreturn_t xenvif_ctrl_interrupt(int irq, void *dev_id) | ||
132 | { | ||
133 | struct xenvif *vif = dev_id; | ||
134 | |||
135 | wake_up(&vif->ctrl_wq); | ||
136 | |||
137 | return IRQ_HANDLED; | ||
138 | } | ||
139 | |||
131 | int xenvif_queue_stopped(struct xenvif_queue *queue) | 140 | int xenvif_queue_stopped(struct xenvif_queue *queue) |
132 | { | 141 | { |
133 | struct net_device *dev = queue->vif->dev; | 142 | struct net_device *dev = queue->vif->dev; |
@@ -527,9 +536,66 @@ void xenvif_carrier_on(struct xenvif *vif) | |||
527 | rtnl_unlock(); | 536 | rtnl_unlock(); |
528 | } | 537 | } |
529 | 538 | ||
530 | int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref, | 539 | int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref, |
531 | unsigned long rx_ring_ref, unsigned int tx_evtchn, | 540 | unsigned int evtchn) |
532 | unsigned int rx_evtchn) | 541 | { |
542 | struct net_device *dev = vif->dev; | ||
543 | void *addr; | ||
544 | struct xen_netif_ctrl_sring *shared; | ||
545 | struct task_struct *task; | ||
546 | int err = -ENOMEM; | ||
547 | |||
548 | err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif), | ||
549 | &ring_ref, 1, &addr); | ||
550 | if (err) | ||
551 | goto err; | ||
552 | |||
553 | shared = (struct xen_netif_ctrl_sring *)addr; | ||
554 | BACK_RING_INIT(&vif->ctrl, shared, XEN_PAGE_SIZE); | ||
555 | |||
556 | init_waitqueue_head(&vif->ctrl_wq); | ||
557 | |||
558 | err = bind_interdomain_evtchn_to_irqhandler(vif->domid, evtchn, | ||
559 | xenvif_ctrl_interrupt, | ||
560 | 0, dev->name, vif); | ||
561 | if (err < 0) | ||
562 | goto err_unmap; | ||
563 | |||
564 | vif->ctrl_irq = err; | ||
565 | |||
566 | task = kthread_create(xenvif_ctrl_kthread, (void *)vif, | ||
567 | "%s-control", dev->name); | ||
568 | if (IS_ERR(task)) { | ||
569 | pr_warn("Could not allocate kthread for %s\n", dev->name); | ||
570 | err = PTR_ERR(task); | ||
571 | goto err_deinit; | ||
572 | } | ||
573 | |||
574 | get_task_struct(task); | ||
575 | vif->ctrl_task = task; | ||
576 | |||
577 | wake_up_process(vif->ctrl_task); | ||
578 | |||
579 | return 0; | ||
580 | |||
581 | err_deinit: | ||
582 | unbind_from_irqhandler(vif->ctrl_irq, vif); | ||
583 | vif->ctrl_irq = 0; | ||
584 | |||
585 | err_unmap: | ||
586 | xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif), | ||
587 | vif->ctrl.sring); | ||
588 | vif->ctrl.sring = NULL; | ||
589 | |||
590 | err: | ||
591 | return err; | ||
592 | } | ||
593 | |||
594 | int xenvif_connect_data(struct xenvif_queue *queue, | ||
595 | unsigned long tx_ring_ref, | ||
596 | unsigned long rx_ring_ref, | ||
597 | unsigned int tx_evtchn, | ||
598 | unsigned int rx_evtchn) | ||
533 | { | 599 | { |
534 | struct task_struct *task; | 600 | struct task_struct *task; |
535 | int err = -ENOMEM; | 601 | int err = -ENOMEM; |
@@ -538,7 +604,8 @@ int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref, | |||
538 | BUG_ON(queue->task); | 604 | BUG_ON(queue->task); |
539 | BUG_ON(queue->dealloc_task); | 605 | BUG_ON(queue->dealloc_task); |
540 | 606 | ||
541 | err = xenvif_map_frontend_rings(queue, tx_ring_ref, rx_ring_ref); | 607 | err = xenvif_map_frontend_data_rings(queue, tx_ring_ref, |
608 | rx_ring_ref); | ||
542 | if (err < 0) | 609 | if (err < 0) |
543 | goto err; | 610 | goto err; |
544 | 611 | ||
@@ -614,7 +681,7 @@ err_tx_unbind: | |||
614 | unbind_from_irqhandler(queue->tx_irq, queue); | 681 | unbind_from_irqhandler(queue->tx_irq, queue); |
615 | queue->tx_irq = 0; | 682 | queue->tx_irq = 0; |
616 | err_unmap: | 683 | err_unmap: |
617 | xenvif_unmap_frontend_rings(queue); | 684 | xenvif_unmap_frontend_data_rings(queue); |
618 | netif_napi_del(&queue->napi); | 685 | netif_napi_del(&queue->napi); |
619 | err: | 686 | err: |
620 | module_put(THIS_MODULE); | 687 | module_put(THIS_MODULE); |
@@ -634,7 +701,7 @@ void xenvif_carrier_off(struct xenvif *vif) | |||
634 | rtnl_unlock(); | 701 | rtnl_unlock(); |
635 | } | 702 | } |
636 | 703 | ||
637 | void xenvif_disconnect(struct xenvif *vif) | 704 | void xenvif_disconnect_data(struct xenvif *vif) |
638 | { | 705 | { |
639 | struct xenvif_queue *queue = NULL; | 706 | struct xenvif_queue *queue = NULL; |
640 | unsigned int num_queues = vif->num_queues; | 707 | unsigned int num_queues = vif->num_queues; |
@@ -668,12 +735,32 @@ void xenvif_disconnect(struct xenvif *vif) | |||
668 | queue->tx_irq = 0; | 735 | queue->tx_irq = 0; |
669 | } | 736 | } |
670 | 737 | ||
671 | xenvif_unmap_frontend_rings(queue); | 738 | xenvif_unmap_frontend_data_rings(queue); |
672 | } | 739 | } |
673 | 740 | ||
674 | xenvif_mcast_addr_list_free(vif); | 741 | xenvif_mcast_addr_list_free(vif); |
675 | } | 742 | } |
676 | 743 | ||
744 | void xenvif_disconnect_ctrl(struct xenvif *vif) | ||
745 | { | ||
746 | if (vif->ctrl_task) { | ||
747 | kthread_stop(vif->ctrl_task); | ||
748 | put_task_struct(vif->ctrl_task); | ||
749 | vif->ctrl_task = NULL; | ||
750 | } | ||
751 | |||
752 | if (vif->ctrl_irq) { | ||
753 | unbind_from_irqhandler(vif->ctrl_irq, vif); | ||
754 | vif->ctrl_irq = 0; | ||
755 | } | ||
756 | |||
757 | if (vif->ctrl.sring) { | ||
758 | xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif), | ||
759 | vif->ctrl.sring); | ||
760 | vif->ctrl.sring = NULL; | ||
761 | } | ||
762 | } | ||
763 | |||
677 | /* Reverse the relevant parts of xenvif_init_queue(). | 764 | /* Reverse the relevant parts of xenvif_init_queue(). |
678 | * Used for queue teardown from xenvif_free(), and on the | 765 | * Used for queue teardown from xenvif_free(), and on the |
679 | * error handling paths in xenbus.c:connect(). | 766 | * error handling paths in xenbus.c:connect(). |
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 4412a57ec862..ff22b6daa077 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c | |||
@@ -1926,7 +1926,7 @@ static inline bool tx_dealloc_work_todo(struct xenvif_queue *queue) | |||
1926 | return queue->dealloc_cons != queue->dealloc_prod; | 1926 | return queue->dealloc_cons != queue->dealloc_prod; |
1927 | } | 1927 | } |
1928 | 1928 | ||
1929 | void xenvif_unmap_frontend_rings(struct xenvif_queue *queue) | 1929 | void xenvif_unmap_frontend_data_rings(struct xenvif_queue *queue) |
1930 | { | 1930 | { |
1931 | if (queue->tx.sring) | 1931 | if (queue->tx.sring) |
1932 | xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif), | 1932 | xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif), |
@@ -1936,9 +1936,9 @@ void xenvif_unmap_frontend_rings(struct xenvif_queue *queue) | |||
1936 | queue->rx.sring); | 1936 | queue->rx.sring); |
1937 | } | 1937 | } |
1938 | 1938 | ||
1939 | int xenvif_map_frontend_rings(struct xenvif_queue *queue, | 1939 | int xenvif_map_frontend_data_rings(struct xenvif_queue *queue, |
1940 | grant_ref_t tx_ring_ref, | 1940 | grant_ref_t tx_ring_ref, |
1941 | grant_ref_t rx_ring_ref) | 1941 | grant_ref_t rx_ring_ref) |
1942 | { | 1942 | { |
1943 | void *addr; | 1943 | void *addr; |
1944 | struct xen_netif_tx_sring *txs; | 1944 | struct xen_netif_tx_sring *txs; |
@@ -1965,7 +1965,7 @@ int xenvif_map_frontend_rings(struct xenvif_queue *queue, | |||
1965 | return 0; | 1965 | return 0; |
1966 | 1966 | ||
1967 | err: | 1967 | err: |
1968 | xenvif_unmap_frontend_rings(queue); | 1968 | xenvif_unmap_frontend_data_rings(queue); |
1969 | return err; | 1969 | return err; |
1970 | } | 1970 | } |
1971 | 1971 | ||
@@ -2164,6 +2164,95 @@ int xenvif_dealloc_kthread(void *data) | |||
2164 | return 0; | 2164 | return 0; |
2165 | } | 2165 | } |
2166 | 2166 | ||
2167 | static void make_ctrl_response(struct xenvif *vif, | ||
2168 | const struct xen_netif_ctrl_request *req, | ||
2169 | u32 status, u32 data) | ||
2170 | { | ||
2171 | RING_IDX idx = vif->ctrl.rsp_prod_pvt; | ||
2172 | struct xen_netif_ctrl_response rsp = { | ||
2173 | .id = req->id, | ||
2174 | .type = req->type, | ||
2175 | .status = status, | ||
2176 | .data = data, | ||
2177 | }; | ||
2178 | |||
2179 | *RING_GET_RESPONSE(&vif->ctrl, idx) = rsp; | ||
2180 | vif->ctrl.rsp_prod_pvt = ++idx; | ||
2181 | } | ||
2182 | |||
2183 | static void push_ctrl_response(struct xenvif *vif) | ||
2184 | { | ||
2185 | int notify; | ||
2186 | |||
2187 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->ctrl, notify); | ||
2188 | if (notify) | ||
2189 | notify_remote_via_irq(vif->ctrl_irq); | ||
2190 | } | ||
2191 | |||
2192 | static void process_ctrl_request(struct xenvif *vif, | ||
2193 | const struct xen_netif_ctrl_request *req) | ||
2194 | { | ||
2195 | make_ctrl_response(vif, req, XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED, | ||
2196 | 0); | ||
2197 | push_ctrl_response(vif); | ||
2198 | } | ||
2199 | |||
2200 | static void xenvif_ctrl_action(struct xenvif *vif) | ||
2201 | { | ||
2202 | for (;;) { | ||
2203 | RING_IDX req_prod, req_cons; | ||
2204 | |||
2205 | req_prod = vif->ctrl.sring->req_prod; | ||
2206 | req_cons = vif->ctrl.req_cons; | ||
2207 | |||
2208 | /* Make sure we can see requests before we process them. */ | ||
2209 | rmb(); | ||
2210 | |||
2211 | if (req_cons == req_prod) | ||
2212 | break; | ||
2213 | |||
2214 | while (req_cons != req_prod) { | ||
2215 | struct xen_netif_ctrl_request req; | ||
2216 | |||
2217 | RING_COPY_REQUEST(&vif->ctrl, req_cons, &req); | ||
2218 | req_cons++; | ||
2219 | |||
2220 | process_ctrl_request(vif, &req); | ||
2221 | } | ||
2222 | |||
2223 | vif->ctrl.req_cons = req_cons; | ||
2224 | vif->ctrl.sring->req_event = req_cons + 1; | ||
2225 | } | ||
2226 | } | ||
2227 | |||
2228 | static bool xenvif_ctrl_work_todo(struct xenvif *vif) | ||
2229 | { | ||
2230 | if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->ctrl))) | ||
2231 | return 1; | ||
2232 | |||
2233 | return 0; | ||
2234 | } | ||
2235 | |||
2236 | int xenvif_ctrl_kthread(void *data) | ||
2237 | { | ||
2238 | struct xenvif *vif = data; | ||
2239 | |||
2240 | for (;;) { | ||
2241 | wait_event_interruptible(vif->ctrl_wq, | ||
2242 | xenvif_ctrl_work_todo(vif) || | ||
2243 | kthread_should_stop()); | ||
2244 | if (kthread_should_stop()) | ||
2245 | break; | ||
2246 | |||
2247 | while (xenvif_ctrl_work_todo(vif)) | ||
2248 | xenvif_ctrl_action(vif); | ||
2249 | |||
2250 | cond_resched(); | ||
2251 | } | ||
2252 | |||
2253 | return 0; | ||
2254 | } | ||
2255 | |||
2167 | static int __init netback_init(void) | 2256 | static int __init netback_init(void) |
2168 | { | 2257 | { |
2169 | int rc = 0; | 2258 | int rc = 0; |
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index bd182cd55dda..6a31f2610c23 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c | |||
@@ -38,7 +38,8 @@ struct backend_info { | |||
38 | const char *hotplug_script; | 38 | const char *hotplug_script; |
39 | }; | 39 | }; |
40 | 40 | ||
41 | static int connect_rings(struct backend_info *be, struct xenvif_queue *queue); | 41 | static int connect_data_rings(struct backend_info *be, |
42 | struct xenvif_queue *queue); | ||
42 | static void connect(struct backend_info *be); | 43 | static void connect(struct backend_info *be); |
43 | static int read_xenbus_vif_flags(struct backend_info *be); | 44 | static int read_xenbus_vif_flags(struct backend_info *be); |
44 | static int backend_create_xenvif(struct backend_info *be); | 45 | static int backend_create_xenvif(struct backend_info *be); |
@@ -367,6 +368,12 @@ static int netback_probe(struct xenbus_device *dev, | |||
367 | if (err) | 368 | if (err) |
368 | pr_debug("Error writing multi-queue-max-queues\n"); | 369 | pr_debug("Error writing multi-queue-max-queues\n"); |
369 | 370 | ||
371 | err = xenbus_printf(XBT_NIL, dev->nodename, | ||
372 | "feature-ctrl-ring", | ||
373 | "%u", true); | ||
374 | if (err) | ||
375 | pr_debug("Error writing feature-ctrl-ring\n"); | ||
376 | |||
370 | script = xenbus_read(XBT_NIL, dev->nodename, "script", NULL); | 377 | script = xenbus_read(XBT_NIL, dev->nodename, "script", NULL); |
371 | if (IS_ERR(script)) { | 378 | if (IS_ERR(script)) { |
372 | err = PTR_ERR(script); | 379 | err = PTR_ERR(script); |
@@ -457,7 +464,8 @@ static void backend_disconnect(struct backend_info *be) | |||
457 | #ifdef CONFIG_DEBUG_FS | 464 | #ifdef CONFIG_DEBUG_FS |
458 | xenvif_debugfs_delif(be->vif); | 465 | xenvif_debugfs_delif(be->vif); |
459 | #endif /* CONFIG_DEBUG_FS */ | 466 | #endif /* CONFIG_DEBUG_FS */ |
460 | xenvif_disconnect(be->vif); | 467 | xenvif_disconnect_data(be->vif); |
468 | xenvif_disconnect_ctrl(be->vif); | ||
461 | } | 469 | } |
462 | } | 470 | } |
463 | 471 | ||
@@ -825,6 +833,48 @@ static void hotplug_status_changed(struct xenbus_watch *watch, | |||
825 | kfree(str); | 833 | kfree(str); |
826 | } | 834 | } |
827 | 835 | ||
836 | static int connect_ctrl_ring(struct backend_info *be) | ||
837 | { | ||
838 | struct xenbus_device *dev = be->dev; | ||
839 | struct xenvif *vif = be->vif; | ||
840 | unsigned int val; | ||
841 | grant_ref_t ring_ref; | ||
842 | unsigned int evtchn; | ||
843 | int err; | ||
844 | |||
845 | err = xenbus_gather(XBT_NIL, dev->otherend, | ||
846 | "ctrl-ring-ref", "%u", &val, NULL); | ||
847 | if (err) | ||
848 | goto done; /* The frontend does not have a control ring */ | ||
849 | |||
850 | ring_ref = val; | ||
851 | |||
852 | err = xenbus_gather(XBT_NIL, dev->otherend, | ||
853 | "event-channel-ctrl", "%u", &val, NULL); | ||
854 | if (err) { | ||
855 | xenbus_dev_fatal(dev, err, | ||
856 | "reading %s/event-channel-ctrl", | ||
857 | dev->otherend); | ||
858 | goto fail; | ||
859 | } | ||
860 | |||
861 | evtchn = val; | ||
862 | |||
863 | err = xenvif_connect_ctrl(vif, ring_ref, evtchn); | ||
864 | if (err) { | ||
865 | xenbus_dev_fatal(dev, err, | ||
866 | "mapping shared-frame %u port %u", | ||
867 | ring_ref, evtchn); | ||
868 | goto fail; | ||
869 | } | ||
870 | |||
871 | done: | ||
872 | return 0; | ||
873 | |||
874 | fail: | ||
875 | return err; | ||
876 | } | ||
877 | |||
828 | static void connect(struct backend_info *be) | 878 | static void connect(struct backend_info *be) |
829 | { | 879 | { |
830 | int err; | 880 | int err; |
@@ -861,6 +911,12 @@ static void connect(struct backend_info *be) | |||
861 | xen_register_watchers(dev, be->vif); | 911 | xen_register_watchers(dev, be->vif); |
862 | read_xenbus_vif_flags(be); | 912 | read_xenbus_vif_flags(be); |
863 | 913 | ||
914 | err = connect_ctrl_ring(be); | ||
915 | if (err) { | ||
916 | xenbus_dev_fatal(dev, err, "connecting control ring"); | ||
917 | return; | ||
918 | } | ||
919 | |||
864 | /* Use the number of queues requested by the frontend */ | 920 | /* Use the number of queues requested by the frontend */ |
865 | be->vif->queues = vzalloc(requested_num_queues * | 921 | be->vif->queues = vzalloc(requested_num_queues * |
866 | sizeof(struct xenvif_queue)); | 922 | sizeof(struct xenvif_queue)); |
@@ -896,11 +952,12 @@ static void connect(struct backend_info *be) | |||
896 | queue->remaining_credit = credit_bytes; | 952 | queue->remaining_credit = credit_bytes; |
897 | queue->credit_usec = credit_usec; | 953 | queue->credit_usec = credit_usec; |
898 | 954 | ||
899 | err = connect_rings(be, queue); | 955 | err = connect_data_rings(be, queue); |
900 | if (err) { | 956 | if (err) { |
901 | /* connect_rings() cleans up after itself on failure, | 957 | /* connect_data_rings() cleans up after itself on |
902 | * but we need to clean up after xenvif_init_queue() here, | 958 | * failure, but we need to clean up after |
903 | * and also clean up any previously initialised queues. | 959 | * xenvif_init_queue() here, and also clean up any |
960 | * previously initialised queues. | ||
904 | */ | 961 | */ |
905 | xenvif_deinit_queue(queue); | 962 | xenvif_deinit_queue(queue); |
906 | be->vif->num_queues = queue_index; | 963 | be->vif->num_queues = queue_index; |
@@ -935,15 +992,17 @@ static void connect(struct backend_info *be) | |||
935 | 992 | ||
936 | err: | 993 | err: |
937 | if (be->vif->num_queues > 0) | 994 | if (be->vif->num_queues > 0) |
938 | xenvif_disconnect(be->vif); /* Clean up existing queues */ | 995 | xenvif_disconnect_data(be->vif); /* Clean up existing queues */ |
939 | vfree(be->vif->queues); | 996 | vfree(be->vif->queues); |
940 | be->vif->queues = NULL; | 997 | be->vif->queues = NULL; |
941 | be->vif->num_queues = 0; | 998 | be->vif->num_queues = 0; |
999 | xenvif_disconnect_ctrl(be->vif); | ||
942 | return; | 1000 | return; |
943 | } | 1001 | } |
944 | 1002 | ||
945 | 1003 | ||
946 | static int connect_rings(struct backend_info *be, struct xenvif_queue *queue) | 1004 | static int connect_data_rings(struct backend_info *be, |
1005 | struct xenvif_queue *queue) | ||
947 | { | 1006 | { |
948 | struct xenbus_device *dev = be->dev; | 1007 | struct xenbus_device *dev = be->dev; |
949 | unsigned int num_queues = queue->vif->num_queues; | 1008 | unsigned int num_queues = queue->vif->num_queues; |
@@ -1007,8 +1066,8 @@ static int connect_rings(struct backend_info *be, struct xenvif_queue *queue) | |||
1007 | } | 1066 | } |
1008 | 1067 | ||
1009 | /* Map the shared frame, irq etc. */ | 1068 | /* Map the shared frame, irq etc. */ |
1010 | err = xenvif_connect(queue, tx_ring_ref, rx_ring_ref, | 1069 | err = xenvif_connect_data(queue, tx_ring_ref, rx_ring_ref, |
1011 | tx_evtchn, rx_evtchn); | 1070 | tx_evtchn, rx_evtchn); |
1012 | if (err) { | 1071 | if (err) { |
1013 | xenbus_dev_fatal(dev, err, | 1072 | xenbus_dev_fatal(dev, err, |
1014 | "mapping shared-frames %lu/%lu port tx %u rx %u", | 1073 | "mapping shared-frames %lu/%lu port tx %u rx %u", |