aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netback/interface.c
diff options
context:
space:
mode:
authorWolfram Sang <wsa@the-dreams.de>2014-06-17 08:36:41 -0400
committerWolfram Sang <wsa@the-dreams.de>2014-06-17 08:37:31 -0400
commitf0b1f6442b5090fed3529cb39f3acf8c91693d3d (patch)
treebc5f62b017a82161c9a7f892f464813f6efd5bf3 /drivers/net/xen-netback/interface.c
parent4632a93f015caf6d7db4352f37aab74a39e60d7a (diff)
parent7171511eaec5bf23fb06078f59784a3a0626b38f (diff)
Merge tag 'v3.16-rc1' into i2c/for-next
Merge a stable base (Linux 3.16-rc1) Signed-off-by: Wolfram Sang <wsa@the-dreams.de>
Diffstat (limited to 'drivers/net/xen-netback/interface.c')
-rw-r--r--drivers/net/xen-netback/interface.c523
1 files changed, 338 insertions, 185 deletions
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 20e9defa1060..852da34b8961 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -43,6 +43,16 @@
43#define XENVIF_QUEUE_LENGTH 32 43#define XENVIF_QUEUE_LENGTH 32
44#define XENVIF_NAPI_WEIGHT 64 44#define XENVIF_NAPI_WEIGHT 64
45 45
46static inline void xenvif_stop_queue(struct xenvif_queue *queue)
47{
48 struct net_device *dev = queue->vif->dev;
49
50 if (!queue->vif->can_queue)
51 return;
52
53 netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
54}
55
46int xenvif_schedulable(struct xenvif *vif) 56int xenvif_schedulable(struct xenvif *vif)
47{ 57{
48 return netif_running(vif->dev) && netif_carrier_ok(vif->dev); 58 return netif_running(vif->dev) && netif_carrier_ok(vif->dev);
@@ -50,33 +60,34 @@ int xenvif_schedulable(struct xenvif *vif)
50 60
51static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id) 61static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
52{ 62{
53 struct xenvif *vif = dev_id; 63 struct xenvif_queue *queue = dev_id;
54 64
55 if (RING_HAS_UNCONSUMED_REQUESTS(&vif->tx)) 65 if (RING_HAS_UNCONSUMED_REQUESTS(&queue->tx))
56 napi_schedule(&vif->napi); 66 napi_schedule(&queue->napi);
57 67
58 return IRQ_HANDLED; 68 return IRQ_HANDLED;
59} 69}
60 70
61static int xenvif_poll(struct napi_struct *napi, int budget) 71int xenvif_poll(struct napi_struct *napi, int budget)
62{ 72{
63 struct xenvif *vif = container_of(napi, struct xenvif, napi); 73 struct xenvif_queue *queue =
74 container_of(napi, struct xenvif_queue, napi);
64 int work_done; 75 int work_done;
65 76
66 /* This vif is rogue, we pretend we've there is nothing to do 77 /* This vif is rogue, we pretend we've there is nothing to do
67 * for this vif to deschedule it from NAPI. But this interface 78 * for this vif to deschedule it from NAPI. But this interface
68 * will be turned off in thread context later. 79 * will be turned off in thread context later.
69 */ 80 */
70 if (unlikely(vif->disabled)) { 81 if (unlikely(queue->vif->disabled)) {
71 napi_complete(napi); 82 napi_complete(napi);
72 return 0; 83 return 0;
73 } 84 }
74 85
75 work_done = xenvif_tx_action(vif, budget); 86 work_done = xenvif_tx_action(queue, budget);
76 87
77 if (work_done < budget) { 88 if (work_done < budget) {
78 napi_complete(napi); 89 napi_complete(napi);
79 xenvif_napi_schedule_or_enable_events(vif); 90 xenvif_napi_schedule_or_enable_events(queue);
80 } 91 }
81 92
82 return work_done; 93 return work_done;
@@ -84,9 +95,9 @@ static int xenvif_poll(struct napi_struct *napi, int budget)
84 95
85static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id) 96static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
86{ 97{
87 struct xenvif *vif = dev_id; 98 struct xenvif_queue *queue = dev_id;
88 99
89 xenvif_kick_thread(vif); 100 xenvif_kick_thread(queue);
90 101
91 return IRQ_HANDLED; 102 return IRQ_HANDLED;
92} 103}
@@ -99,28 +110,80 @@ static irqreturn_t xenvif_interrupt(int irq, void *dev_id)
99 return IRQ_HANDLED; 110 return IRQ_HANDLED;
100} 111}
101 112
102static void xenvif_wake_queue(unsigned long data) 113int xenvif_queue_stopped(struct xenvif_queue *queue)
103{ 114{
104 struct xenvif *vif = (struct xenvif *)data; 115 struct net_device *dev = queue->vif->dev;
116 unsigned int id = queue->id;
117 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, id));
118}
105 119
106 if (netif_queue_stopped(vif->dev)) { 120void xenvif_wake_queue(struct xenvif_queue *queue)
107 netdev_err(vif->dev, "draining TX queue\n"); 121{
108 vif->rx_queue_purge = true; 122 struct net_device *dev = queue->vif->dev;
109 xenvif_kick_thread(vif); 123 unsigned int id = queue->id;
110 netif_wake_queue(vif->dev); 124 netif_tx_wake_queue(netdev_get_tx_queue(dev, id));
125}
126
127/* Callback to wake the queue and drain it on timeout */
128static void xenvif_wake_queue_callback(unsigned long data)
129{
130 struct xenvif_queue *queue = (struct xenvif_queue *)data;
131
132 if (xenvif_queue_stopped(queue)) {
133 netdev_err(queue->vif->dev, "draining TX queue\n");
134 queue->rx_queue_purge = true;
135 xenvif_kick_thread(queue);
136 xenvif_wake_queue(queue);
111 } 137 }
112} 138}
113 139
140static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
141 void *accel_priv, select_queue_fallback_t fallback)
142{
143 unsigned int num_queues = dev->real_num_tx_queues;
144 u32 hash;
145 u16 queue_index;
146
147 /* First, check if there is only one queue to optimise the
148 * single-queue or old frontend scenario.
149 */
150 if (num_queues == 1) {
151 queue_index = 0;
152 } else {
153 /* Use skb_get_hash to obtain an L4 hash if available */
154 hash = skb_get_hash(skb);
155 queue_index = hash % num_queues;
156 }
157
158 return queue_index;
159}
160
114static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) 161static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
115{ 162{
116 struct xenvif *vif = netdev_priv(dev); 163 struct xenvif *vif = netdev_priv(dev);
164 struct xenvif_queue *queue = NULL;
165 unsigned int num_queues = dev->real_num_tx_queues;
166 u16 index;
117 int min_slots_needed; 167 int min_slots_needed;
118 168
119 BUG_ON(skb->dev != dev); 169 BUG_ON(skb->dev != dev);
120 170
121 /* Drop the packet if vif is not ready */ 171 /* Drop the packet if queues are not set up */
122 if (vif->task == NULL || 172 if (num_queues < 1)
123 vif->dealloc_task == NULL || 173 goto drop;
174
175 /* Obtain the queue to be used to transmit this packet */
176 index = skb_get_queue_mapping(skb);
177 if (index >= num_queues) {
178 pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n.",
179 index, vif->dev->name);
180 index %= num_queues;
181 }
182 queue = &vif->queues[index];
183
184 /* Drop the packet if queue is not ready */
185 if (queue->task == NULL ||
186 queue->dealloc_task == NULL ||
124 !xenvif_schedulable(vif)) 187 !xenvif_schedulable(vif))
125 goto drop; 188 goto drop;
126 189
@@ -139,16 +202,16 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
139 * then turn off the queue to give the ring a chance to 202 * then turn off the queue to give the ring a chance to
140 * drain. 203 * drain.
141 */ 204 */
142 if (!xenvif_rx_ring_slots_available(vif, min_slots_needed)) { 205 if (!xenvif_rx_ring_slots_available(queue, min_slots_needed)) {
143 vif->wake_queue.function = xenvif_wake_queue; 206 queue->wake_queue.function = xenvif_wake_queue_callback;
144 vif->wake_queue.data = (unsigned long)vif; 207 queue->wake_queue.data = (unsigned long)queue;
145 xenvif_stop_queue(vif); 208 xenvif_stop_queue(queue);
146 mod_timer(&vif->wake_queue, 209 mod_timer(&queue->wake_queue,
147 jiffies + rx_drain_timeout_jiffies); 210 jiffies + rx_drain_timeout_jiffies);
148 } 211 }
149 212
150 skb_queue_tail(&vif->rx_queue, skb); 213 skb_queue_tail(&queue->rx_queue, skb);
151 xenvif_kick_thread(vif); 214 xenvif_kick_thread(queue);
152 215
153 return NETDEV_TX_OK; 216 return NETDEV_TX_OK;
154 217
@@ -161,25 +224,65 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
161static struct net_device_stats *xenvif_get_stats(struct net_device *dev) 224static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
162{ 225{
163 struct xenvif *vif = netdev_priv(dev); 226 struct xenvif *vif = netdev_priv(dev);
227 struct xenvif_queue *queue = NULL;
228 unsigned int num_queues = dev->real_num_tx_queues;
229 unsigned long rx_bytes = 0;
230 unsigned long rx_packets = 0;
231 unsigned long tx_bytes = 0;
232 unsigned long tx_packets = 0;
233 unsigned int index;
234
235 if (vif->queues == NULL)
236 goto out;
237
238 /* Aggregate tx and rx stats from each queue */
239 for (index = 0; index < num_queues; ++index) {
240 queue = &vif->queues[index];
241 rx_bytes += queue->stats.rx_bytes;
242 rx_packets += queue->stats.rx_packets;
243 tx_bytes += queue->stats.tx_bytes;
244 tx_packets += queue->stats.tx_packets;
245 }
246
247out:
248 vif->dev->stats.rx_bytes = rx_bytes;
249 vif->dev->stats.rx_packets = rx_packets;
250 vif->dev->stats.tx_bytes = tx_bytes;
251 vif->dev->stats.tx_packets = tx_packets;
252
164 return &vif->dev->stats; 253 return &vif->dev->stats;
165} 254}
166 255
167static void xenvif_up(struct xenvif *vif) 256static void xenvif_up(struct xenvif *vif)
168{ 257{
169 napi_enable(&vif->napi); 258 struct xenvif_queue *queue = NULL;
170 enable_irq(vif->tx_irq); 259 unsigned int num_queues = vif->dev->real_num_tx_queues;
171 if (vif->tx_irq != vif->rx_irq) 260 unsigned int queue_index;
172 enable_irq(vif->rx_irq); 261
173 xenvif_napi_schedule_or_enable_events(vif); 262 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
263 queue = &vif->queues[queue_index];
264 napi_enable(&queue->napi);
265 enable_irq(queue->tx_irq);
266 if (queue->tx_irq != queue->rx_irq)
267 enable_irq(queue->rx_irq);
268 xenvif_napi_schedule_or_enable_events(queue);
269 }
174} 270}
175 271
176static void xenvif_down(struct xenvif *vif) 272static void xenvif_down(struct xenvif *vif)
177{ 273{
178 napi_disable(&vif->napi); 274 struct xenvif_queue *queue = NULL;
179 disable_irq(vif->tx_irq); 275 unsigned int num_queues = vif->dev->real_num_tx_queues;
180 if (vif->tx_irq != vif->rx_irq) 276 unsigned int queue_index;
181 disable_irq(vif->rx_irq); 277
182 del_timer_sync(&vif->credit_timeout); 278 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
279 queue = &vif->queues[queue_index];
280 napi_disable(&queue->napi);
281 disable_irq(queue->tx_irq);
282 if (queue->tx_irq != queue->rx_irq)
283 disable_irq(queue->rx_irq);
284 del_timer_sync(&queue->credit_timeout);
285 }
183} 286}
184 287
185static int xenvif_open(struct net_device *dev) 288static int xenvif_open(struct net_device *dev)
@@ -187,7 +290,7 @@ static int xenvif_open(struct net_device *dev)
187 struct xenvif *vif = netdev_priv(dev); 290 struct xenvif *vif = netdev_priv(dev);
188 if (netif_carrier_ok(dev)) 291 if (netif_carrier_ok(dev))
189 xenvif_up(vif); 292 xenvif_up(vif);
190 netif_start_queue(dev); 293 netif_tx_start_all_queues(dev);
191 return 0; 294 return 0;
192} 295}
193 296
@@ -196,7 +299,7 @@ static int xenvif_close(struct net_device *dev)
196 struct xenvif *vif = netdev_priv(dev); 299 struct xenvif *vif = netdev_priv(dev);
197 if (netif_carrier_ok(dev)) 300 if (netif_carrier_ok(dev))
198 xenvif_down(vif); 301 xenvif_down(vif);
199 netif_stop_queue(dev); 302 netif_tx_stop_all_queues(dev);
200 return 0; 303 return 0;
201} 304}
202 305
@@ -236,29 +339,29 @@ static const struct xenvif_stat {
236} xenvif_stats[] = { 339} xenvif_stats[] = {
237 { 340 {
238 "rx_gso_checksum_fixup", 341 "rx_gso_checksum_fixup",
239 offsetof(struct xenvif, rx_gso_checksum_fixup) 342 offsetof(struct xenvif_stats, rx_gso_checksum_fixup)
240 }, 343 },
241 /* If (sent != success + fail), there are probably packets never 344 /* If (sent != success + fail), there are probably packets never
242 * freed up properly! 345 * freed up properly!
243 */ 346 */
244 { 347 {
245 "tx_zerocopy_sent", 348 "tx_zerocopy_sent",
246 offsetof(struct xenvif, tx_zerocopy_sent), 349 offsetof(struct xenvif_stats, tx_zerocopy_sent),
247 }, 350 },
248 { 351 {
249 "tx_zerocopy_success", 352 "tx_zerocopy_success",
250 offsetof(struct xenvif, tx_zerocopy_success), 353 offsetof(struct xenvif_stats, tx_zerocopy_success),
251 }, 354 },
252 { 355 {
253 "tx_zerocopy_fail", 356 "tx_zerocopy_fail",
254 offsetof(struct xenvif, tx_zerocopy_fail) 357 offsetof(struct xenvif_stats, tx_zerocopy_fail)
255 }, 358 },
256 /* Number of packets exceeding MAX_SKB_FRAG slots. You should use 359 /* Number of packets exceeding MAX_SKB_FRAG slots. You should use
257 * a guest with the same MAX_SKB_FRAG 360 * a guest with the same MAX_SKB_FRAG
258 */ 361 */
259 { 362 {
260 "tx_frag_overflow", 363 "tx_frag_overflow",
261 offsetof(struct xenvif, tx_frag_overflow) 364 offsetof(struct xenvif_stats, tx_frag_overflow)
262 }, 365 },
263}; 366};
264 367
@@ -275,11 +378,20 @@ static int xenvif_get_sset_count(struct net_device *dev, int string_set)
275static void xenvif_get_ethtool_stats(struct net_device *dev, 378static void xenvif_get_ethtool_stats(struct net_device *dev,
276 struct ethtool_stats *stats, u64 * data) 379 struct ethtool_stats *stats, u64 * data)
277{ 380{
278 void *vif = netdev_priv(dev); 381 struct xenvif *vif = netdev_priv(dev);
382 unsigned int num_queues = dev->real_num_tx_queues;
279 int i; 383 int i;
280 384 unsigned int queue_index;
281 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) 385 struct xenvif_stats *vif_stats;
282 data[i] = *(unsigned long *)(vif + xenvif_stats[i].offset); 386
387 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) {
388 unsigned long accum = 0;
389 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
390 vif_stats = &vif->queues[queue_index].stats;
391 accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset);
392 }
393 data[i] = accum;
394 }
283} 395}
284 396
285static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data) 397static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data)
@@ -312,6 +424,7 @@ static const struct net_device_ops xenvif_netdev_ops = {
312 .ndo_fix_features = xenvif_fix_features, 424 .ndo_fix_features = xenvif_fix_features,
313 .ndo_set_mac_address = eth_mac_addr, 425 .ndo_set_mac_address = eth_mac_addr,
314 .ndo_validate_addr = eth_validate_addr, 426 .ndo_validate_addr = eth_validate_addr,
427 .ndo_select_queue = xenvif_select_queue,
315}; 428};
316 429
317struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, 430struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
@@ -321,10 +434,14 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
321 struct net_device *dev; 434 struct net_device *dev;
322 struct xenvif *vif; 435 struct xenvif *vif;
323 char name[IFNAMSIZ] = {}; 436 char name[IFNAMSIZ] = {};
324 int i;
325 437
326 snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle); 438 snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
327 dev = alloc_netdev(sizeof(struct xenvif), name, ether_setup); 439 /* Allocate a netdev with the max. supported number of queues.
440 * When the guest selects the desired number, it will be updated
441 * via netif_set_real_num_tx_queues().
442 */
443 dev = alloc_netdev_mq(sizeof(struct xenvif), name, ether_setup,
444 xenvif_max_queues);
328 if (dev == NULL) { 445 if (dev == NULL) {
329 pr_warn("Could not allocate netdev for %s\n", name); 446 pr_warn("Could not allocate netdev for %s\n", name);
330 return ERR_PTR(-ENOMEM); 447 return ERR_PTR(-ENOMEM);
@@ -334,66 +451,28 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
334 451
335 vif = netdev_priv(dev); 452 vif = netdev_priv(dev);
336 453
337 vif->grant_copy_op = vmalloc(sizeof(struct gnttab_copy) *
338 MAX_GRANT_COPY_OPS);
339 if (vif->grant_copy_op == NULL) {
340 pr_warn("Could not allocate grant copy space for %s\n", name);
341 free_netdev(dev);
342 return ERR_PTR(-ENOMEM);
343 }
344
345 vif->domid = domid; 454 vif->domid = domid;
346 vif->handle = handle; 455 vif->handle = handle;
347 vif->can_sg = 1; 456 vif->can_sg = 1;
348 vif->ip_csum = 1; 457 vif->ip_csum = 1;
349 vif->dev = dev; 458 vif->dev = dev;
350
351 vif->disabled = false; 459 vif->disabled = false;
352 460
353 vif->credit_bytes = vif->remaining_credit = ~0UL; 461 /* Start out with no queues. The call below does not require
354 vif->credit_usec = 0UL; 462 * rtnl_lock() as it happens before register_netdev().
355 init_timer(&vif->credit_timeout); 463 */
356 vif->credit_window_start = get_jiffies_64(); 464 vif->queues = NULL;
357 465 netif_set_real_num_tx_queues(dev, 0);
358 init_timer(&vif->wake_queue);
359 466
360 dev->netdev_ops = &xenvif_netdev_ops; 467 dev->netdev_ops = &xenvif_netdev_ops;
361 dev->hw_features = NETIF_F_SG | 468 dev->hw_features = NETIF_F_SG |
362 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 469 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
363 NETIF_F_TSO | NETIF_F_TSO6; 470 NETIF_F_TSO | NETIF_F_TSO6;
364 dev->features = dev->hw_features | NETIF_F_RXCSUM; 471 dev->features = dev->hw_features | NETIF_F_RXCSUM;
365 SET_ETHTOOL_OPS(dev, &xenvif_ethtool_ops); 472 dev->ethtool_ops = &xenvif_ethtool_ops;
366 473
367 dev->tx_queue_len = XENVIF_QUEUE_LENGTH; 474 dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
368 475
369 skb_queue_head_init(&vif->rx_queue);
370 skb_queue_head_init(&vif->tx_queue);
371
372 vif->pending_cons = 0;
373 vif->pending_prod = MAX_PENDING_REQS;
374 for (i = 0; i < MAX_PENDING_REQS; i++)
375 vif->pending_ring[i] = i;
376 spin_lock_init(&vif->callback_lock);
377 spin_lock_init(&vif->response_lock);
378 /* If ballooning is disabled, this will consume real memory, so you
379 * better enable it. The long term solution would be to use just a
380 * bunch of valid page descriptors, without dependency on ballooning
381 */
382 err = alloc_xenballooned_pages(MAX_PENDING_REQS,
383 vif->mmap_pages,
384 false);
385 if (err) {
386 netdev_err(dev, "Could not reserve mmap_pages\n");
387 return ERR_PTR(-ENOMEM);
388 }
389 for (i = 0; i < MAX_PENDING_REQS; i++) {
390 vif->pending_tx_info[i].callback_struct = (struct ubuf_info)
391 { .callback = xenvif_zerocopy_callback,
392 .ctx = NULL,
393 .desc = i };
394 vif->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
395 }
396
397 /* 476 /*
398 * Initialise a dummy MAC address. We choose the numerically 477 * Initialise a dummy MAC address. We choose the numerically
399 * largest non-broadcast address to prevent the address getting 478 * largest non-broadcast address to prevent the address getting
@@ -403,8 +482,6 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
403 memset(dev->dev_addr, 0xFF, ETH_ALEN); 482 memset(dev->dev_addr, 0xFF, ETH_ALEN);
404 dev->dev_addr[0] &= ~0x01; 483 dev->dev_addr[0] &= ~0x01;
405 484
406 netif_napi_add(dev, &vif->napi, xenvif_poll, XENVIF_NAPI_WEIGHT);
407
408 netif_carrier_off(dev); 485 netif_carrier_off(dev);
409 486
410 err = register_netdev(dev); 487 err = register_netdev(dev);
@@ -421,98 +498,147 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
421 return vif; 498 return vif;
422} 499}
423 500
424int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, 501int xenvif_init_queue(struct xenvif_queue *queue)
502{
503 int err, i;
504
505 queue->credit_bytes = queue->remaining_credit = ~0UL;
506 queue->credit_usec = 0UL;
507 init_timer(&queue->credit_timeout);
508 queue->credit_window_start = get_jiffies_64();
509
510 skb_queue_head_init(&queue->rx_queue);
511 skb_queue_head_init(&queue->tx_queue);
512
513 queue->pending_cons = 0;
514 queue->pending_prod = MAX_PENDING_REQS;
515 for (i = 0; i < MAX_PENDING_REQS; ++i)
516 queue->pending_ring[i] = i;
517
518 spin_lock_init(&queue->callback_lock);
519 spin_lock_init(&queue->response_lock);
520
521 /* If ballooning is disabled, this will consume real memory, so you
522 * better enable it. The long term solution would be to use just a
523 * bunch of valid page descriptors, without dependency on ballooning
524 */
525 err = alloc_xenballooned_pages(MAX_PENDING_REQS,
526 queue->mmap_pages,
527 false);
528 if (err) {
529 netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n");
530 return -ENOMEM;
531 }
532
533 for (i = 0; i < MAX_PENDING_REQS; i++) {
534 queue->pending_tx_info[i].callback_struct = (struct ubuf_info)
535 { .callback = xenvif_zerocopy_callback,
536 .ctx = NULL,
537 .desc = i };
538 queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
539 }
540
541 init_timer(&queue->wake_queue);
542
543 netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
544 XENVIF_NAPI_WEIGHT);
545
546 return 0;
547}
548
549void xenvif_carrier_on(struct xenvif *vif)
550{
551 rtnl_lock();
552 if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
553 dev_set_mtu(vif->dev, ETH_DATA_LEN);
554 netdev_update_features(vif->dev);
555 netif_carrier_on(vif->dev);
556 if (netif_running(vif->dev))
557 xenvif_up(vif);
558 rtnl_unlock();
559}
560
561int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
425 unsigned long rx_ring_ref, unsigned int tx_evtchn, 562 unsigned long rx_ring_ref, unsigned int tx_evtchn,
426 unsigned int rx_evtchn) 563 unsigned int rx_evtchn)
427{ 564{
428 struct task_struct *task; 565 struct task_struct *task;
429 int err = -ENOMEM; 566 int err = -ENOMEM;
430 567
431 BUG_ON(vif->tx_irq); 568 BUG_ON(queue->tx_irq);
432 BUG_ON(vif->task); 569 BUG_ON(queue->task);
433 BUG_ON(vif->dealloc_task); 570 BUG_ON(queue->dealloc_task);
434 571
435 err = xenvif_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref); 572 err = xenvif_map_frontend_rings(queue, tx_ring_ref, rx_ring_ref);
436 if (err < 0) 573 if (err < 0)
437 goto err; 574 goto err;
438 575
439 init_waitqueue_head(&vif->wq); 576 init_waitqueue_head(&queue->wq);
440 init_waitqueue_head(&vif->dealloc_wq); 577 init_waitqueue_head(&queue->dealloc_wq);
441 578
442 if (tx_evtchn == rx_evtchn) { 579 if (tx_evtchn == rx_evtchn) {
443 /* feature-split-event-channels == 0 */ 580 /* feature-split-event-channels == 0 */
444 err = bind_interdomain_evtchn_to_irqhandler( 581 err = bind_interdomain_evtchn_to_irqhandler(
445 vif->domid, tx_evtchn, xenvif_interrupt, 0, 582 queue->vif->domid, tx_evtchn, xenvif_interrupt, 0,
446 vif->dev->name, vif); 583 queue->name, queue);
447 if (err < 0) 584 if (err < 0)
448 goto err_unmap; 585 goto err_unmap;
449 vif->tx_irq = vif->rx_irq = err; 586 queue->tx_irq = queue->rx_irq = err;
450 disable_irq(vif->tx_irq); 587 disable_irq(queue->tx_irq);
451 } else { 588 } else {
452 /* feature-split-event-channels == 1 */ 589 /* feature-split-event-channels == 1 */
453 snprintf(vif->tx_irq_name, sizeof(vif->tx_irq_name), 590 snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
454 "%s-tx", vif->dev->name); 591 "%s-tx", queue->name);
455 err = bind_interdomain_evtchn_to_irqhandler( 592 err = bind_interdomain_evtchn_to_irqhandler(
456 vif->domid, tx_evtchn, xenvif_tx_interrupt, 0, 593 queue->vif->domid, tx_evtchn, xenvif_tx_interrupt, 0,
457 vif->tx_irq_name, vif); 594 queue->tx_irq_name, queue);
458 if (err < 0) 595 if (err < 0)
459 goto err_unmap; 596 goto err_unmap;
460 vif->tx_irq = err; 597 queue->tx_irq = err;
461 disable_irq(vif->tx_irq); 598 disable_irq(queue->tx_irq);
462 599
463 snprintf(vif->rx_irq_name, sizeof(vif->rx_irq_name), 600 snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
464 "%s-rx", vif->dev->name); 601 "%s-rx", queue->name);
465 err = bind_interdomain_evtchn_to_irqhandler( 602 err = bind_interdomain_evtchn_to_irqhandler(
466 vif->domid, rx_evtchn, xenvif_rx_interrupt, 0, 603 queue->vif->domid, rx_evtchn, xenvif_rx_interrupt, 0,
467 vif->rx_irq_name, vif); 604 queue->rx_irq_name, queue);
468 if (err < 0) 605 if (err < 0)
469 goto err_tx_unbind; 606 goto err_tx_unbind;
470 vif->rx_irq = err; 607 queue->rx_irq = err;
471 disable_irq(vif->rx_irq); 608 disable_irq(queue->rx_irq);
472 } 609 }
473 610
474 task = kthread_create(xenvif_kthread_guest_rx, 611 task = kthread_create(xenvif_kthread_guest_rx,
475 (void *)vif, "%s-guest-rx", vif->dev->name); 612 (void *)queue, "%s-guest-rx", queue->name);
476 if (IS_ERR(task)) { 613 if (IS_ERR(task)) {
477 pr_warn("Could not allocate kthread for %s\n", vif->dev->name); 614 pr_warn("Could not allocate kthread for %s\n", queue->name);
478 err = PTR_ERR(task); 615 err = PTR_ERR(task);
479 goto err_rx_unbind; 616 goto err_rx_unbind;
480 } 617 }
481 618 queue->task = task;
482 vif->task = task;
483 619
484 task = kthread_create(xenvif_dealloc_kthread, 620 task = kthread_create(xenvif_dealloc_kthread,
485 (void *)vif, "%s-dealloc", vif->dev->name); 621 (void *)queue, "%s-dealloc", queue->name);
486 if (IS_ERR(task)) { 622 if (IS_ERR(task)) {
487 pr_warn("Could not allocate kthread for %s\n", vif->dev->name); 623 pr_warn("Could not allocate kthread for %s\n", queue->name);
488 err = PTR_ERR(task); 624 err = PTR_ERR(task);
489 goto err_rx_unbind; 625 goto err_rx_unbind;
490 } 626 }
627 queue->dealloc_task = task;
491 628
492 vif->dealloc_task = task; 629 wake_up_process(queue->task);
493 630 wake_up_process(queue->dealloc_task);
494 rtnl_lock();
495 if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
496 dev_set_mtu(vif->dev, ETH_DATA_LEN);
497 netdev_update_features(vif->dev);
498 netif_carrier_on(vif->dev);
499 if (netif_running(vif->dev))
500 xenvif_up(vif);
501 rtnl_unlock();
502
503 wake_up_process(vif->task);
504 wake_up_process(vif->dealloc_task);
505 631
506 return 0; 632 return 0;
507 633
508err_rx_unbind: 634err_rx_unbind:
509 unbind_from_irqhandler(vif->rx_irq, vif); 635 unbind_from_irqhandler(queue->rx_irq, queue);
510 vif->rx_irq = 0; 636 queue->rx_irq = 0;
511err_tx_unbind: 637err_tx_unbind:
512 unbind_from_irqhandler(vif->tx_irq, vif); 638 unbind_from_irqhandler(queue->tx_irq, queue);
513 vif->tx_irq = 0; 639 queue->tx_irq = 0;
514err_unmap: 640err_unmap:
515 xenvif_unmap_frontend_rings(vif); 641 xenvif_unmap_frontend_rings(queue);
516err: 642err:
517 module_put(THIS_MODULE); 643 module_put(THIS_MODULE);
518 return err; 644 return err;
@@ -529,38 +655,77 @@ void xenvif_carrier_off(struct xenvif *vif)
529 rtnl_unlock(); 655 rtnl_unlock();
530} 656}
531 657
658static void xenvif_wait_unmap_timeout(struct xenvif_queue *queue,
659 unsigned int worst_case_skb_lifetime)
660{
661 int i, unmap_timeout = 0;
662
663 for (i = 0; i < MAX_PENDING_REQS; ++i) {
664 if (queue->grant_tx_handle[i] != NETBACK_INVALID_HANDLE) {
665 unmap_timeout++;
666 schedule_timeout(msecs_to_jiffies(1000));
667 if (unmap_timeout > worst_case_skb_lifetime &&
668 net_ratelimit())
669 netdev_err(queue->vif->dev,
670 "Page still granted! Index: %x\n",
671 i);
672 i = -1;
673 }
674 }
675}
676
532void xenvif_disconnect(struct xenvif *vif) 677void xenvif_disconnect(struct xenvif *vif)
533{ 678{
679 struct xenvif_queue *queue = NULL;
680 unsigned int num_queues = vif->dev->real_num_tx_queues;
681 unsigned int queue_index;
682
534 if (netif_carrier_ok(vif->dev)) 683 if (netif_carrier_ok(vif->dev))
535 xenvif_carrier_off(vif); 684 xenvif_carrier_off(vif);
536 685
537 if (vif->task) { 686 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
538 del_timer_sync(&vif->wake_queue); 687 queue = &vif->queues[queue_index];
539 kthread_stop(vif->task);
540 vif->task = NULL;
541 }
542 688
543 if (vif->dealloc_task) { 689 if (queue->task) {
544 kthread_stop(vif->dealloc_task); 690 del_timer_sync(&queue->wake_queue);
545 vif->dealloc_task = NULL; 691 kthread_stop(queue->task);
546 } 692 queue->task = NULL;
693 }
547 694
548 if (vif->tx_irq) { 695 if (queue->dealloc_task) {
549 if (vif->tx_irq == vif->rx_irq) 696 kthread_stop(queue->dealloc_task);
550 unbind_from_irqhandler(vif->tx_irq, vif); 697 queue->dealloc_task = NULL;
551 else {
552 unbind_from_irqhandler(vif->tx_irq, vif);
553 unbind_from_irqhandler(vif->rx_irq, vif);
554 } 698 }
555 vif->tx_irq = 0; 699
700 if (queue->tx_irq) {
701 if (queue->tx_irq == queue->rx_irq)
702 unbind_from_irqhandler(queue->tx_irq, queue);
703 else {
704 unbind_from_irqhandler(queue->tx_irq, queue);
705 unbind_from_irqhandler(queue->rx_irq, queue);
706 }
707 queue->tx_irq = 0;
708 }
709
710 xenvif_unmap_frontend_rings(queue);
556 } 711 }
712}
557 713
558 xenvif_unmap_frontend_rings(vif); 714/* Reverse the relevant parts of xenvif_init_queue().
715 * Used for queue teardown from xenvif_free(), and on the
716 * error handling paths in xenbus.c:connect().
717 */
718void xenvif_deinit_queue(struct xenvif_queue *queue)
719{
720 free_xenballooned_pages(MAX_PENDING_REQS, queue->mmap_pages);
721 netif_napi_del(&queue->napi);
559} 722}
560 723
561void xenvif_free(struct xenvif *vif) 724void xenvif_free(struct xenvif *vif)
562{ 725{
563 int i, unmap_timeout = 0; 726 struct xenvif_queue *queue = NULL;
727 unsigned int num_queues = vif->dev->real_num_tx_queues;
728 unsigned int queue_index;
564 /* Here we want to avoid timeout messages if an skb can be legitimately 729 /* Here we want to avoid timeout messages if an skb can be legitimately
565 * stuck somewhere else. Realistically this could be an another vif's 730 * stuck somewhere else. Realistically this could be an another vif's
566 * internal or QDisc queue. That another vif also has this 731 * internal or QDisc queue. That another vif also has this
@@ -575,33 +740,21 @@ void xenvif_free(struct xenvif *vif)
575 unsigned int worst_case_skb_lifetime = (rx_drain_timeout_msecs/1000) * 740 unsigned int worst_case_skb_lifetime = (rx_drain_timeout_msecs/1000) *
576 DIV_ROUND_UP(XENVIF_QUEUE_LENGTH, (XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS)); 741 DIV_ROUND_UP(XENVIF_QUEUE_LENGTH, (XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS));
577 742
578 for (i = 0; i < MAX_PENDING_REQS; ++i) { 743 unregister_netdev(vif->dev);
579 if (vif->grant_tx_handle[i] != NETBACK_INVALID_HANDLE) {
580 unmap_timeout++;
581 schedule_timeout(msecs_to_jiffies(1000));
582 if (unmap_timeout > worst_case_skb_lifetime &&
583 net_ratelimit())
584 netdev_err(vif->dev,
585 "Page still granted! Index: %x\n",
586 i);
587 /* If there are still unmapped pages, reset the loop to
588 * start checking again. We shouldn't exit here until
589 * dealloc thread and NAPI instance release all the
590 * pages. If a kernel bug causes the skbs to stall
591 * somewhere, the interface cannot be brought down
592 * properly.
593 */
594 i = -1;
595 }
596 }
597
598 free_xenballooned_pages(MAX_PENDING_REQS, vif->mmap_pages);
599 744
600 netif_napi_del(&vif->napi); 745 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
746 queue = &vif->queues[queue_index];
747 xenvif_wait_unmap_timeout(queue, worst_case_skb_lifetime);
748 xenvif_deinit_queue(queue);
749 }
601 750
602 unregister_netdev(vif->dev); 751 /* Free the array of queues. The call below does not require
752 * rtnl_lock() because it happens after unregister_netdev().
753 */
754 netif_set_real_num_tx_queues(vif->dev, 0);
755 vfree(vif->queues);
756 vif->queues = NULL;
603 757
604 vfree(vif->grant_copy_op);
605 free_netdev(vif->dev); 758 free_netdev(vif->dev);
606 759
607 module_put(THIS_MODULE); 760 module_put(THIS_MODULE);