aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netback/interface.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/xen-netback/interface.c')
-rw-r--r--drivers/net/xen-netback/interface.c522
1 files changed, 312 insertions, 210 deletions
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index ef05c5c49d41..9e97c7ca0ddd 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -43,6 +43,16 @@
43#define XENVIF_QUEUE_LENGTH 32 43#define XENVIF_QUEUE_LENGTH 32
44#define XENVIF_NAPI_WEIGHT 64 44#define XENVIF_NAPI_WEIGHT 64
45 45
46static inline void xenvif_stop_queue(struct xenvif_queue *queue)
47{
48 struct net_device *dev = queue->vif->dev;
49
50 if (!queue->vif->can_queue)
51 return;
52
53 netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
54}
55
46int xenvif_schedulable(struct xenvif *vif) 56int xenvif_schedulable(struct xenvif *vif)
47{ 57{
48 return netif_running(vif->dev) && netif_carrier_ok(vif->dev); 58 return netif_running(vif->dev) && netif_carrier_ok(vif->dev);
@@ -50,57 +60,34 @@ int xenvif_schedulable(struct xenvif *vif)
50 60
51static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id) 61static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
52{ 62{
53 struct xenvif *vif = dev_id; 63 struct xenvif_queue *queue = dev_id;
54 64
55 if (RING_HAS_UNCONSUMED_REQUESTS(&vif->tx)) 65 if (RING_HAS_UNCONSUMED_REQUESTS(&queue->tx))
56 napi_schedule(&vif->napi); 66 napi_schedule(&queue->napi);
57 67
58 return IRQ_HANDLED; 68 return IRQ_HANDLED;
59} 69}
60 70
61static int xenvif_poll(struct napi_struct *napi, int budget) 71int xenvif_poll(struct napi_struct *napi, int budget)
62{ 72{
63 struct xenvif *vif = container_of(napi, struct xenvif, napi); 73 struct xenvif_queue *queue =
74 container_of(napi, struct xenvif_queue, napi);
64 int work_done; 75 int work_done;
65 76
66 /* This vif is rogue, we pretend we've there is nothing to do 77 /* This vif is rogue, we pretend we've there is nothing to do
67 * for this vif to deschedule it from NAPI. But this interface 78 * for this vif to deschedule it from NAPI. But this interface
68 * will be turned off in thread context later. 79 * will be turned off in thread context later.
69 */ 80 */
70 if (unlikely(vif->disabled)) { 81 if (unlikely(queue->vif->disabled)) {
71 napi_complete(napi); 82 napi_complete(napi);
72 return 0; 83 return 0;
73 } 84 }
74 85
75 work_done = xenvif_tx_action(vif, budget); 86 work_done = xenvif_tx_action(queue, budget);
76 87
77 if (work_done < budget) { 88 if (work_done < budget) {
78 int more_to_do = 0; 89 napi_complete(napi);
79 unsigned long flags; 90 xenvif_napi_schedule_or_enable_events(queue);
80
81 /* It is necessary to disable IRQ before calling
82 * RING_HAS_UNCONSUMED_REQUESTS. Otherwise we might
83 * lose event from the frontend.
84 *
85 * Consider:
86 * RING_HAS_UNCONSUMED_REQUESTS
87 * <frontend generates event to trigger napi_schedule>
88 * __napi_complete
89 *
90 * This handler is still in scheduled state so the
91 * event has no effect at all. After __napi_complete
92 * this handler is descheduled and cannot get
93 * scheduled again. We lose event in this case and the ring
94 * will be completely stalled.
95 */
96
97 local_irq_save(flags);
98
99 RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);
100 if (!more_to_do)
101 __napi_complete(napi);
102
103 local_irq_restore(flags);
104 } 91 }
105 92
106 return work_done; 93 return work_done;
@@ -108,9 +95,9 @@ static int xenvif_poll(struct napi_struct *napi, int budget)
108 95
109static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id) 96static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
110{ 97{
111 struct xenvif *vif = dev_id; 98 struct xenvif_queue *queue = dev_id;
112 99
113 xenvif_kick_thread(vif); 100 xenvif_kick_thread(queue);
114 101
115 return IRQ_HANDLED; 102 return IRQ_HANDLED;
116} 103}
@@ -123,28 +110,59 @@ static irqreturn_t xenvif_interrupt(int irq, void *dev_id)
123 return IRQ_HANDLED; 110 return IRQ_HANDLED;
124} 111}
125 112
126static void xenvif_wake_queue(unsigned long data) 113int xenvif_queue_stopped(struct xenvif_queue *queue)
114{
115 struct net_device *dev = queue->vif->dev;
116 unsigned int id = queue->id;
117 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, id));
118}
119
120void xenvif_wake_queue(struct xenvif_queue *queue)
127{ 121{
128 struct xenvif *vif = (struct xenvif *)data; 122 struct net_device *dev = queue->vif->dev;
123 unsigned int id = queue->id;
124 netif_tx_wake_queue(netdev_get_tx_queue(dev, id));
125}
126
127/* Callback to wake the queue and drain it on timeout */
128static void xenvif_wake_queue_callback(unsigned long data)
129{
130 struct xenvif_queue *queue = (struct xenvif_queue *)data;
129 131
130 if (netif_queue_stopped(vif->dev)) { 132 if (xenvif_queue_stopped(queue)) {
131 netdev_err(vif->dev, "draining TX queue\n"); 133 netdev_err(queue->vif->dev, "draining TX queue\n");
132 vif->rx_queue_purge = true; 134 queue->rx_queue_purge = true;
133 xenvif_kick_thread(vif); 135 xenvif_kick_thread(queue);
134 netif_wake_queue(vif->dev); 136 xenvif_wake_queue(queue);
135 } 137 }
136} 138}
137 139
138static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) 140static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
139{ 141{
140 struct xenvif *vif = netdev_priv(dev); 142 struct xenvif *vif = netdev_priv(dev);
143 struct xenvif_queue *queue = NULL;
144 unsigned int num_queues = vif->num_queues;
145 u16 index;
141 int min_slots_needed; 146 int min_slots_needed;
142 147
143 BUG_ON(skb->dev != dev); 148 BUG_ON(skb->dev != dev);
144 149
145 /* Drop the packet if vif is not ready */ 150 /* Drop the packet if queues are not set up */
146 if (vif->task == NULL || 151 if (num_queues < 1)
147 vif->dealloc_task == NULL || 152 goto drop;
153
154 /* Obtain the queue to be used to transmit this packet */
155 index = skb_get_queue_mapping(skb);
156 if (index >= num_queues) {
157 pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n.",
158 index, vif->dev->name);
159 index %= num_queues;
160 }
161 queue = &vif->queues[index];
162
163 /* Drop the packet if queue is not ready */
164 if (queue->task == NULL ||
165 queue->dealloc_task == NULL ||
148 !xenvif_schedulable(vif)) 166 !xenvif_schedulable(vif))
149 goto drop; 167 goto drop;
150 168
@@ -163,16 +181,16 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
163 * then turn off the queue to give the ring a chance to 181 * then turn off the queue to give the ring a chance to
164 * drain. 182 * drain.
165 */ 183 */
166 if (!xenvif_rx_ring_slots_available(vif, min_slots_needed)) { 184 if (!xenvif_rx_ring_slots_available(queue, min_slots_needed)) {
167 vif->wake_queue.function = xenvif_wake_queue; 185 queue->wake_queue.function = xenvif_wake_queue_callback;
168 vif->wake_queue.data = (unsigned long)vif; 186 queue->wake_queue.data = (unsigned long)queue;
169 xenvif_stop_queue(vif); 187 xenvif_stop_queue(queue);
170 mod_timer(&vif->wake_queue, 188 mod_timer(&queue->wake_queue,
171 jiffies + rx_drain_timeout_jiffies); 189 jiffies + rx_drain_timeout_jiffies);
172 } 190 }
173 191
174 skb_queue_tail(&vif->rx_queue, skb); 192 skb_queue_tail(&queue->rx_queue, skb);
175 xenvif_kick_thread(vif); 193 xenvif_kick_thread(queue);
176 194
177 return NETDEV_TX_OK; 195 return NETDEV_TX_OK;
178 196
@@ -185,25 +203,65 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
185static struct net_device_stats *xenvif_get_stats(struct net_device *dev) 203static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
186{ 204{
187 struct xenvif *vif = netdev_priv(dev); 205 struct xenvif *vif = netdev_priv(dev);
206 struct xenvif_queue *queue = NULL;
207 unsigned int num_queues = vif->num_queues;
208 unsigned long rx_bytes = 0;
209 unsigned long rx_packets = 0;
210 unsigned long tx_bytes = 0;
211 unsigned long tx_packets = 0;
212 unsigned int index;
213
214 if (vif->queues == NULL)
215 goto out;
216
217 /* Aggregate tx and rx stats from each queue */
218 for (index = 0; index < num_queues; ++index) {
219 queue = &vif->queues[index];
220 rx_bytes += queue->stats.rx_bytes;
221 rx_packets += queue->stats.rx_packets;
222 tx_bytes += queue->stats.tx_bytes;
223 tx_packets += queue->stats.tx_packets;
224 }
225
226out:
227 vif->dev->stats.rx_bytes = rx_bytes;
228 vif->dev->stats.rx_packets = rx_packets;
229 vif->dev->stats.tx_bytes = tx_bytes;
230 vif->dev->stats.tx_packets = tx_packets;
231
188 return &vif->dev->stats; 232 return &vif->dev->stats;
189} 233}
190 234
191static void xenvif_up(struct xenvif *vif) 235static void xenvif_up(struct xenvif *vif)
192{ 236{
193 napi_enable(&vif->napi); 237 struct xenvif_queue *queue = NULL;
194 enable_irq(vif->tx_irq); 238 unsigned int num_queues = vif->num_queues;
195 if (vif->tx_irq != vif->rx_irq) 239 unsigned int queue_index;
196 enable_irq(vif->rx_irq); 240
197 xenvif_check_rx_xenvif(vif); 241 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
242 queue = &vif->queues[queue_index];
243 napi_enable(&queue->napi);
244 enable_irq(queue->tx_irq);
245 if (queue->tx_irq != queue->rx_irq)
246 enable_irq(queue->rx_irq);
247 xenvif_napi_schedule_or_enable_events(queue);
248 }
198} 249}
199 250
200static void xenvif_down(struct xenvif *vif) 251static void xenvif_down(struct xenvif *vif)
201{ 252{
202 napi_disable(&vif->napi); 253 struct xenvif_queue *queue = NULL;
203 disable_irq(vif->tx_irq); 254 unsigned int num_queues = vif->num_queues;
204 if (vif->tx_irq != vif->rx_irq) 255 unsigned int queue_index;
205 disable_irq(vif->rx_irq); 256
206 del_timer_sync(&vif->credit_timeout); 257 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
258 queue = &vif->queues[queue_index];
259 napi_disable(&queue->napi);
260 disable_irq(queue->tx_irq);
261 if (queue->tx_irq != queue->rx_irq)
262 disable_irq(queue->rx_irq);
263 del_timer_sync(&queue->credit_timeout);
264 }
207} 265}
208 266
209static int xenvif_open(struct net_device *dev) 267static int xenvif_open(struct net_device *dev)
@@ -211,7 +269,7 @@ static int xenvif_open(struct net_device *dev)
211 struct xenvif *vif = netdev_priv(dev); 269 struct xenvif *vif = netdev_priv(dev);
212 if (netif_carrier_ok(dev)) 270 if (netif_carrier_ok(dev))
213 xenvif_up(vif); 271 xenvif_up(vif);
214 netif_start_queue(dev); 272 netif_tx_start_all_queues(dev);
215 return 0; 273 return 0;
216} 274}
217 275
@@ -220,7 +278,7 @@ static int xenvif_close(struct net_device *dev)
220 struct xenvif *vif = netdev_priv(dev); 278 struct xenvif *vif = netdev_priv(dev);
221 if (netif_carrier_ok(dev)) 279 if (netif_carrier_ok(dev))
222 xenvif_down(vif); 280 xenvif_down(vif);
223 netif_stop_queue(dev); 281 netif_tx_stop_all_queues(dev);
224 return 0; 282 return 0;
225} 283}
226 284
@@ -260,29 +318,29 @@ static const struct xenvif_stat {
260} xenvif_stats[] = { 318} xenvif_stats[] = {
261 { 319 {
262 "rx_gso_checksum_fixup", 320 "rx_gso_checksum_fixup",
263 offsetof(struct xenvif, rx_gso_checksum_fixup) 321 offsetof(struct xenvif_stats, rx_gso_checksum_fixup)
264 }, 322 },
265 /* If (sent != success + fail), there are probably packets never 323 /* If (sent != success + fail), there are probably packets never
266 * freed up properly! 324 * freed up properly!
267 */ 325 */
268 { 326 {
269 "tx_zerocopy_sent", 327 "tx_zerocopy_sent",
270 offsetof(struct xenvif, tx_zerocopy_sent), 328 offsetof(struct xenvif_stats, tx_zerocopy_sent),
271 }, 329 },
272 { 330 {
273 "tx_zerocopy_success", 331 "tx_zerocopy_success",
274 offsetof(struct xenvif, tx_zerocopy_success), 332 offsetof(struct xenvif_stats, tx_zerocopy_success),
275 }, 333 },
276 { 334 {
277 "tx_zerocopy_fail", 335 "tx_zerocopy_fail",
278 offsetof(struct xenvif, tx_zerocopy_fail) 336 offsetof(struct xenvif_stats, tx_zerocopy_fail)
279 }, 337 },
280 /* Number of packets exceeding MAX_SKB_FRAG slots. You should use 338 /* Number of packets exceeding MAX_SKB_FRAG slots. You should use
281 * a guest with the same MAX_SKB_FRAG 339 * a guest with the same MAX_SKB_FRAG
282 */ 340 */
283 { 341 {
284 "tx_frag_overflow", 342 "tx_frag_overflow",
285 offsetof(struct xenvif, tx_frag_overflow) 343 offsetof(struct xenvif_stats, tx_frag_overflow)
286 }, 344 },
287}; 345};
288 346
@@ -299,11 +357,20 @@ static int xenvif_get_sset_count(struct net_device *dev, int string_set)
299static void xenvif_get_ethtool_stats(struct net_device *dev, 357static void xenvif_get_ethtool_stats(struct net_device *dev,
300 struct ethtool_stats *stats, u64 * data) 358 struct ethtool_stats *stats, u64 * data)
301{ 359{
302 void *vif = netdev_priv(dev); 360 struct xenvif *vif = netdev_priv(dev);
361 unsigned int num_queues = vif->num_queues;
303 int i; 362 int i;
304 363 unsigned int queue_index;
305 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) 364 struct xenvif_stats *vif_stats;
306 data[i] = *(unsigned long *)(vif + xenvif_stats[i].offset); 365
366 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) {
367 unsigned long accum = 0;
368 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
369 vif_stats = &vif->queues[queue_index].stats;
370 accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset);
371 }
372 data[i] = accum;
373 }
307} 374}
308 375
309static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data) 376static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data)
@@ -345,10 +412,14 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
345 struct net_device *dev; 412 struct net_device *dev;
346 struct xenvif *vif; 413 struct xenvif *vif;
347 char name[IFNAMSIZ] = {}; 414 char name[IFNAMSIZ] = {};
348 int i;
349 415
350 snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle); 416 snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
351 dev = alloc_netdev(sizeof(struct xenvif), name, ether_setup); 417 /* Allocate a netdev with the max. supported number of queues.
418 * When the guest selects the desired number, it will be updated
419 * via netif_set_real_num_*_queues().
420 */
421 dev = alloc_netdev_mq(sizeof(struct xenvif), name, ether_setup,
422 xenvif_max_queues);
352 if (dev == NULL) { 423 if (dev == NULL) {
353 pr_warn("Could not allocate netdev for %s\n", name); 424 pr_warn("Could not allocate netdev for %s\n", name);
354 return ERR_PTR(-ENOMEM); 425 return ERR_PTR(-ENOMEM);
@@ -358,66 +429,26 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
358 429
359 vif = netdev_priv(dev); 430 vif = netdev_priv(dev);
360 431
361 vif->grant_copy_op = vmalloc(sizeof(struct gnttab_copy) *
362 MAX_GRANT_COPY_OPS);
363 if (vif->grant_copy_op == NULL) {
364 pr_warn("Could not allocate grant copy space for %s\n", name);
365 free_netdev(dev);
366 return ERR_PTR(-ENOMEM);
367 }
368
369 vif->domid = domid; 432 vif->domid = domid;
370 vif->handle = handle; 433 vif->handle = handle;
371 vif->can_sg = 1; 434 vif->can_sg = 1;
372 vif->ip_csum = 1; 435 vif->ip_csum = 1;
373 vif->dev = dev; 436 vif->dev = dev;
374
375 vif->disabled = false; 437 vif->disabled = false;
376 438
377 vif->credit_bytes = vif->remaining_credit = ~0UL; 439 /* Start out with no queues. */
378 vif->credit_usec = 0UL; 440 vif->queues = NULL;
379 init_timer(&vif->credit_timeout); 441 vif->num_queues = 0;
380 vif->credit_window_start = get_jiffies_64();
381
382 init_timer(&vif->wake_queue);
383 442
384 dev->netdev_ops = &xenvif_netdev_ops; 443 dev->netdev_ops = &xenvif_netdev_ops;
385 dev->hw_features = NETIF_F_SG | 444 dev->hw_features = NETIF_F_SG |
386 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 445 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
387 NETIF_F_TSO | NETIF_F_TSO6; 446 NETIF_F_TSO | NETIF_F_TSO6;
388 dev->features = dev->hw_features | NETIF_F_RXCSUM; 447 dev->features = dev->hw_features | NETIF_F_RXCSUM;
389 SET_ETHTOOL_OPS(dev, &xenvif_ethtool_ops); 448 dev->ethtool_ops = &xenvif_ethtool_ops;
390 449
391 dev->tx_queue_len = XENVIF_QUEUE_LENGTH; 450 dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
392 451
393 skb_queue_head_init(&vif->rx_queue);
394 skb_queue_head_init(&vif->tx_queue);
395
396 vif->pending_cons = 0;
397 vif->pending_prod = MAX_PENDING_REQS;
398 for (i = 0; i < MAX_PENDING_REQS; i++)
399 vif->pending_ring[i] = i;
400 spin_lock_init(&vif->callback_lock);
401 spin_lock_init(&vif->response_lock);
402 /* If ballooning is disabled, this will consume real memory, so you
403 * better enable it. The long term solution would be to use just a
404 * bunch of valid page descriptors, without dependency on ballooning
405 */
406 err = alloc_xenballooned_pages(MAX_PENDING_REQS,
407 vif->mmap_pages,
408 false);
409 if (err) {
410 netdev_err(dev, "Could not reserve mmap_pages\n");
411 return ERR_PTR(-ENOMEM);
412 }
413 for (i = 0; i < MAX_PENDING_REQS; i++) {
414 vif->pending_tx_info[i].callback_struct = (struct ubuf_info)
415 { .callback = xenvif_zerocopy_callback,
416 .ctx = NULL,
417 .desc = i };
418 vif->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
419 }
420
421 /* 452 /*
422 * Initialise a dummy MAC address. We choose the numerically 453 * Initialise a dummy MAC address. We choose the numerically
423 * largest non-broadcast address to prevent the address getting 454 * largest non-broadcast address to prevent the address getting
@@ -427,8 +458,6 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
427 memset(dev->dev_addr, 0xFF, ETH_ALEN); 458 memset(dev->dev_addr, 0xFF, ETH_ALEN);
428 dev->dev_addr[0] &= ~0x01; 459 dev->dev_addr[0] &= ~0x01;
429 460
430 netif_napi_add(dev, &vif->napi, xenvif_poll, XENVIF_NAPI_WEIGHT);
431
432 netif_carrier_off(dev); 461 netif_carrier_off(dev);
433 462
434 err = register_netdev(dev); 463 err = register_netdev(dev);
@@ -445,98 +474,147 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
445 return vif; 474 return vif;
446} 475}
447 476
448int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, 477int xenvif_init_queue(struct xenvif_queue *queue)
478{
479 int err, i;
480
481 queue->credit_bytes = queue->remaining_credit = ~0UL;
482 queue->credit_usec = 0UL;
483 init_timer(&queue->credit_timeout);
484 queue->credit_window_start = get_jiffies_64();
485
486 skb_queue_head_init(&queue->rx_queue);
487 skb_queue_head_init(&queue->tx_queue);
488
489 queue->pending_cons = 0;
490 queue->pending_prod = MAX_PENDING_REQS;
491 for (i = 0; i < MAX_PENDING_REQS; ++i)
492 queue->pending_ring[i] = i;
493
494 spin_lock_init(&queue->callback_lock);
495 spin_lock_init(&queue->response_lock);
496
497 /* If ballooning is disabled, this will consume real memory, so you
498 * better enable it. The long term solution would be to use just a
499 * bunch of valid page descriptors, without dependency on ballooning
500 */
501 err = alloc_xenballooned_pages(MAX_PENDING_REQS,
502 queue->mmap_pages,
503 false);
504 if (err) {
505 netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n");
506 return -ENOMEM;
507 }
508
509 for (i = 0; i < MAX_PENDING_REQS; i++) {
510 queue->pending_tx_info[i].callback_struct = (struct ubuf_info)
511 { .callback = xenvif_zerocopy_callback,
512 .ctx = NULL,
513 .desc = i };
514 queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
515 }
516
517 init_timer(&queue->wake_queue);
518
519 netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
520 XENVIF_NAPI_WEIGHT);
521
522 return 0;
523}
524
525void xenvif_carrier_on(struct xenvif *vif)
526{
527 rtnl_lock();
528 if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
529 dev_set_mtu(vif->dev, ETH_DATA_LEN);
530 netdev_update_features(vif->dev);
531 netif_carrier_on(vif->dev);
532 if (netif_running(vif->dev))
533 xenvif_up(vif);
534 rtnl_unlock();
535}
536
537int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
449 unsigned long rx_ring_ref, unsigned int tx_evtchn, 538 unsigned long rx_ring_ref, unsigned int tx_evtchn,
450 unsigned int rx_evtchn) 539 unsigned int rx_evtchn)
451{ 540{
452 struct task_struct *task; 541 struct task_struct *task;
453 int err = -ENOMEM; 542 int err = -ENOMEM;
454 543
455 BUG_ON(vif->tx_irq); 544 BUG_ON(queue->tx_irq);
456 BUG_ON(vif->task); 545 BUG_ON(queue->task);
457 BUG_ON(vif->dealloc_task); 546 BUG_ON(queue->dealloc_task);
458 547
459 err = xenvif_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref); 548 err = xenvif_map_frontend_rings(queue, tx_ring_ref, rx_ring_ref);
460 if (err < 0) 549 if (err < 0)
461 goto err; 550 goto err;
462 551
463 init_waitqueue_head(&vif->wq); 552 init_waitqueue_head(&queue->wq);
464 init_waitqueue_head(&vif->dealloc_wq); 553 init_waitqueue_head(&queue->dealloc_wq);
465 554
466 if (tx_evtchn == rx_evtchn) { 555 if (tx_evtchn == rx_evtchn) {
467 /* feature-split-event-channels == 0 */ 556 /* feature-split-event-channels == 0 */
468 err = bind_interdomain_evtchn_to_irqhandler( 557 err = bind_interdomain_evtchn_to_irqhandler(
469 vif->domid, tx_evtchn, xenvif_interrupt, 0, 558 queue->vif->domid, tx_evtchn, xenvif_interrupt, 0,
470 vif->dev->name, vif); 559 queue->name, queue);
471 if (err < 0) 560 if (err < 0)
472 goto err_unmap; 561 goto err_unmap;
473 vif->tx_irq = vif->rx_irq = err; 562 queue->tx_irq = queue->rx_irq = err;
474 disable_irq(vif->tx_irq); 563 disable_irq(queue->tx_irq);
475 } else { 564 } else {
476 /* feature-split-event-channels == 1 */ 565 /* feature-split-event-channels == 1 */
477 snprintf(vif->tx_irq_name, sizeof(vif->tx_irq_name), 566 snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
478 "%s-tx", vif->dev->name); 567 "%s-tx", queue->name);
479 err = bind_interdomain_evtchn_to_irqhandler( 568 err = bind_interdomain_evtchn_to_irqhandler(
480 vif->domid, tx_evtchn, xenvif_tx_interrupt, 0, 569 queue->vif->domid, tx_evtchn, xenvif_tx_interrupt, 0,
481 vif->tx_irq_name, vif); 570 queue->tx_irq_name, queue);
482 if (err < 0) 571 if (err < 0)
483 goto err_unmap; 572 goto err_unmap;
484 vif->tx_irq = err; 573 queue->tx_irq = err;
485 disable_irq(vif->tx_irq); 574 disable_irq(queue->tx_irq);
486 575
487 snprintf(vif->rx_irq_name, sizeof(vif->rx_irq_name), 576 snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
488 "%s-rx", vif->dev->name); 577 "%s-rx", queue->name);
489 err = bind_interdomain_evtchn_to_irqhandler( 578 err = bind_interdomain_evtchn_to_irqhandler(
490 vif->domid, rx_evtchn, xenvif_rx_interrupt, 0, 579 queue->vif->domid, rx_evtchn, xenvif_rx_interrupt, 0,
491 vif->rx_irq_name, vif); 580 queue->rx_irq_name, queue);
492 if (err < 0) 581 if (err < 0)
493 goto err_tx_unbind; 582 goto err_tx_unbind;
494 vif->rx_irq = err; 583 queue->rx_irq = err;
495 disable_irq(vif->rx_irq); 584 disable_irq(queue->rx_irq);
496 } 585 }
497 586
498 task = kthread_create(xenvif_kthread_guest_rx, 587 task = kthread_create(xenvif_kthread_guest_rx,
499 (void *)vif, "%s-guest-rx", vif->dev->name); 588 (void *)queue, "%s-guest-rx", queue->name);
500 if (IS_ERR(task)) { 589 if (IS_ERR(task)) {
501 pr_warn("Could not allocate kthread for %s\n", vif->dev->name); 590 pr_warn("Could not allocate kthread for %s\n", queue->name);
502 err = PTR_ERR(task); 591 err = PTR_ERR(task);
503 goto err_rx_unbind; 592 goto err_rx_unbind;
504 } 593 }
505 594 queue->task = task;
506 vif->task = task;
507 595
508 task = kthread_create(xenvif_dealloc_kthread, 596 task = kthread_create(xenvif_dealloc_kthread,
509 (void *)vif, "%s-dealloc", vif->dev->name); 597 (void *)queue, "%s-dealloc", queue->name);
510 if (IS_ERR(task)) { 598 if (IS_ERR(task)) {
511 pr_warn("Could not allocate kthread for %s\n", vif->dev->name); 599 pr_warn("Could not allocate kthread for %s\n", queue->name);
512 err = PTR_ERR(task); 600 err = PTR_ERR(task);
513 goto err_rx_unbind; 601 goto err_rx_unbind;
514 } 602 }
603 queue->dealloc_task = task;
515 604
516 vif->dealloc_task = task; 605 wake_up_process(queue->task);
517 606 wake_up_process(queue->dealloc_task);
518 rtnl_lock();
519 if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
520 dev_set_mtu(vif->dev, ETH_DATA_LEN);
521 netdev_update_features(vif->dev);
522 netif_carrier_on(vif->dev);
523 if (netif_running(vif->dev))
524 xenvif_up(vif);
525 rtnl_unlock();
526
527 wake_up_process(vif->task);
528 wake_up_process(vif->dealloc_task);
529 607
530 return 0; 608 return 0;
531 609
532err_rx_unbind: 610err_rx_unbind:
533 unbind_from_irqhandler(vif->rx_irq, vif); 611 unbind_from_irqhandler(queue->rx_irq, queue);
534 vif->rx_irq = 0; 612 queue->rx_irq = 0;
535err_tx_unbind: 613err_tx_unbind:
536 unbind_from_irqhandler(vif->tx_irq, vif); 614 unbind_from_irqhandler(queue->tx_irq, queue);
537 vif->tx_irq = 0; 615 queue->tx_irq = 0;
538err_unmap: 616err_unmap:
539 xenvif_unmap_frontend_rings(vif); 617 xenvif_unmap_frontend_rings(queue);
540err: 618err:
541 module_put(THIS_MODULE); 619 module_put(THIS_MODULE);
542 return err; 620 return err;
@@ -553,38 +631,77 @@ void xenvif_carrier_off(struct xenvif *vif)
553 rtnl_unlock(); 631 rtnl_unlock();
554} 632}
555 633
634static void xenvif_wait_unmap_timeout(struct xenvif_queue *queue,
635 unsigned int worst_case_skb_lifetime)
636{
637 int i, unmap_timeout = 0;
638
639 for (i = 0; i < MAX_PENDING_REQS; ++i) {
640 if (queue->grant_tx_handle[i] != NETBACK_INVALID_HANDLE) {
641 unmap_timeout++;
642 schedule_timeout(msecs_to_jiffies(1000));
643 if (unmap_timeout > worst_case_skb_lifetime &&
644 net_ratelimit())
645 netdev_err(queue->vif->dev,
646 "Page still granted! Index: %x\n",
647 i);
648 i = -1;
649 }
650 }
651}
652
556void xenvif_disconnect(struct xenvif *vif) 653void xenvif_disconnect(struct xenvif *vif)
557{ 654{
655 struct xenvif_queue *queue = NULL;
656 unsigned int num_queues = vif->num_queues;
657 unsigned int queue_index;
658
558 if (netif_carrier_ok(vif->dev)) 659 if (netif_carrier_ok(vif->dev))
559 xenvif_carrier_off(vif); 660 xenvif_carrier_off(vif);
560 661
561 if (vif->task) { 662 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
562 del_timer_sync(&vif->wake_queue); 663 queue = &vif->queues[queue_index];
563 kthread_stop(vif->task);
564 vif->task = NULL;
565 }
566 664
567 if (vif->dealloc_task) { 665 if (queue->task) {
568 kthread_stop(vif->dealloc_task); 666 del_timer_sync(&queue->wake_queue);
569 vif->dealloc_task = NULL; 667 kthread_stop(queue->task);
570 } 668 queue->task = NULL;
669 }
571 670
572 if (vif->tx_irq) { 671 if (queue->dealloc_task) {
573 if (vif->tx_irq == vif->rx_irq) 672 kthread_stop(queue->dealloc_task);
574 unbind_from_irqhandler(vif->tx_irq, vif); 673 queue->dealloc_task = NULL;
575 else {
576 unbind_from_irqhandler(vif->tx_irq, vif);
577 unbind_from_irqhandler(vif->rx_irq, vif);
578 } 674 }
579 vif->tx_irq = 0; 675
676 if (queue->tx_irq) {
677 if (queue->tx_irq == queue->rx_irq)
678 unbind_from_irqhandler(queue->tx_irq, queue);
679 else {
680 unbind_from_irqhandler(queue->tx_irq, queue);
681 unbind_from_irqhandler(queue->rx_irq, queue);
682 }
683 queue->tx_irq = 0;
684 }
685
686 xenvif_unmap_frontend_rings(queue);
580 } 687 }
688}
581 689
582 xenvif_unmap_frontend_rings(vif); 690/* Reverse the relevant parts of xenvif_init_queue().
691 * Used for queue teardown from xenvif_free(), and on the
692 * error handling paths in xenbus.c:connect().
693 */
694void xenvif_deinit_queue(struct xenvif_queue *queue)
695{
696 free_xenballooned_pages(MAX_PENDING_REQS, queue->mmap_pages);
697 netif_napi_del(&queue->napi);
583} 698}
584 699
585void xenvif_free(struct xenvif *vif) 700void xenvif_free(struct xenvif *vif)
586{ 701{
587 int i, unmap_timeout = 0; 702 struct xenvif_queue *queue = NULL;
703 unsigned int num_queues = vif->num_queues;
704 unsigned int queue_index;
588 /* Here we want to avoid timeout messages if an skb can be legitimately 705 /* Here we want to avoid timeout messages if an skb can be legitimately
589 * stuck somewhere else. Realistically this could be an another vif's 706 * stuck somewhere else. Realistically this could be an another vif's
590 * internal or QDisc queue. That another vif also has this 707 * internal or QDisc queue. That another vif also has this
@@ -599,33 +716,18 @@ void xenvif_free(struct xenvif *vif)
599 unsigned int worst_case_skb_lifetime = (rx_drain_timeout_msecs/1000) * 716 unsigned int worst_case_skb_lifetime = (rx_drain_timeout_msecs/1000) *
600 DIV_ROUND_UP(XENVIF_QUEUE_LENGTH, (XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS)); 717 DIV_ROUND_UP(XENVIF_QUEUE_LENGTH, (XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS));
601 718
602 for (i = 0; i < MAX_PENDING_REQS; ++i) { 719 unregister_netdev(vif->dev);
603 if (vif->grant_tx_handle[i] != NETBACK_INVALID_HANDLE) {
604 unmap_timeout++;
605 schedule_timeout(msecs_to_jiffies(1000));
606 if (unmap_timeout > worst_case_skb_lifetime &&
607 net_ratelimit())
608 netdev_err(vif->dev,
609 "Page still granted! Index: %x\n",
610 i);
611 /* If there are still unmapped pages, reset the loop to
612 * start checking again. We shouldn't exit here until
613 * dealloc thread and NAPI instance release all the
614 * pages. If a kernel bug causes the skbs to stall
615 * somewhere, the interface cannot be brought down
616 * properly.
617 */
618 i = -1;
619 }
620 }
621
622 free_xenballooned_pages(MAX_PENDING_REQS, vif->mmap_pages);
623 720
624 netif_napi_del(&vif->napi); 721 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
722 queue = &vif->queues[queue_index];
723 xenvif_wait_unmap_timeout(queue, worst_case_skb_lifetime);
724 xenvif_deinit_queue(queue);
725 }
625 726
626 unregister_netdev(vif->dev); 727 vfree(vif->queues);
728 vif->queues = NULL;
729 vif->num_queues = 0;
627 730
628 vfree(vif->grant_copy_op);
629 free_netdev(vif->dev); 731 free_netdev(vif->dev);
630 732
631 module_put(THIS_MODULE); 733 module_put(THIS_MODULE);