aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netfront.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/xen-netfront.c')
-rw-r--r--drivers/net/xen-netfront.c1164
1 files changed, 746 insertions, 418 deletions
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 158b5e639fc7..2ccb4a02368b 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -57,6 +57,12 @@
57#include <xen/interface/memory.h> 57#include <xen/interface/memory.h>
58#include <xen/interface/grant_table.h> 58#include <xen/interface/grant_table.h>
59 59
60/* Module parameters */
61static unsigned int xennet_max_queues;
62module_param_named(max_queues, xennet_max_queues, uint, 0644);
63MODULE_PARM_DESC(max_queues,
64 "Maximum number of queues per virtual interface");
65
60static const struct ethtool_ops xennet_ethtool_ops; 66static const struct ethtool_ops xennet_ethtool_ops;
61 67
62struct netfront_cb { 68struct netfront_cb {
@@ -73,6 +79,12 @@ struct netfront_cb {
73#define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE) 79#define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE)
74#define TX_MAX_TARGET min_t(int, NET_TX_RING_SIZE, 256) 80#define TX_MAX_TARGET min_t(int, NET_TX_RING_SIZE, 256)
75 81
82/* Queue name is interface name with "-qNNN" appended */
83#define QUEUE_NAME_SIZE (IFNAMSIZ + 6)
84
85/* IRQ name is queue name with "-tx" or "-rx" appended */
86#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
87
76struct netfront_stats { 88struct netfront_stats {
77 u64 rx_packets; 89 u64 rx_packets;
78 u64 tx_packets; 90 u64 tx_packets;
@@ -81,9 +93,12 @@ struct netfront_stats {
81 struct u64_stats_sync syncp; 93 struct u64_stats_sync syncp;
82}; 94};
83 95
84struct netfront_info { 96struct netfront_info;
85 struct list_head list; 97
86 struct net_device *netdev; 98struct netfront_queue {
99 unsigned int id; /* Queue ID, 0-based */
100 char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */
101 struct netfront_info *info;
87 102
88 struct napi_struct napi; 103 struct napi_struct napi;
89 104
@@ -93,10 +108,8 @@ struct netfront_info {
93 unsigned int tx_evtchn, rx_evtchn; 108 unsigned int tx_evtchn, rx_evtchn;
94 unsigned int tx_irq, rx_irq; 109 unsigned int tx_irq, rx_irq;
95 /* Only used when split event channels support is enabled */ 110 /* Only used when split event channels support is enabled */
96 char tx_irq_name[IFNAMSIZ+4]; /* DEVNAME-tx */ 111 char tx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-tx */
97 char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */ 112 char rx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-rx */
98
99 struct xenbus_device *xbdev;
100 113
101 spinlock_t tx_lock; 114 spinlock_t tx_lock;
102 struct xen_netif_tx_front_ring tx; 115 struct xen_netif_tx_front_ring tx;
@@ -140,11 +153,21 @@ struct netfront_info {
140 unsigned long rx_pfn_array[NET_RX_RING_SIZE]; 153 unsigned long rx_pfn_array[NET_RX_RING_SIZE];
141 struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1]; 154 struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1];
142 struct mmu_update rx_mmu[NET_RX_RING_SIZE]; 155 struct mmu_update rx_mmu[NET_RX_RING_SIZE];
156};
157
158struct netfront_info {
159 struct list_head list;
160 struct net_device *netdev;
161
162 struct xenbus_device *xbdev;
163
164 /* Multi-queue support */
165 struct netfront_queue *queues;
143 166
144 /* Statistics */ 167 /* Statistics */
145 struct netfront_stats __percpu *stats; 168 struct netfront_stats __percpu *stats;
146 169
147 unsigned long rx_gso_checksum_fixup; 170 atomic_t rx_gso_checksum_fixup;
148}; 171};
149 172
150struct netfront_rx_info { 173struct netfront_rx_info {
@@ -187,21 +210,21 @@ static int xennet_rxidx(RING_IDX idx)
187 return idx & (NET_RX_RING_SIZE - 1); 210 return idx & (NET_RX_RING_SIZE - 1);
188} 211}
189 212
190static struct sk_buff *xennet_get_rx_skb(struct netfront_info *np, 213static struct sk_buff *xennet_get_rx_skb(struct netfront_queue *queue,
191 RING_IDX ri) 214 RING_IDX ri)
192{ 215{
193 int i = xennet_rxidx(ri); 216 int i = xennet_rxidx(ri);
194 struct sk_buff *skb = np->rx_skbs[i]; 217 struct sk_buff *skb = queue->rx_skbs[i];
195 np->rx_skbs[i] = NULL; 218 queue->rx_skbs[i] = NULL;
196 return skb; 219 return skb;
197} 220}
198 221
199static grant_ref_t xennet_get_rx_ref(struct netfront_info *np, 222static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue,
200 RING_IDX ri) 223 RING_IDX ri)
201{ 224{
202 int i = xennet_rxidx(ri); 225 int i = xennet_rxidx(ri);
203 grant_ref_t ref = np->grant_rx_ref[i]; 226 grant_ref_t ref = queue->grant_rx_ref[i];
204 np->grant_rx_ref[i] = GRANT_INVALID_REF; 227 queue->grant_rx_ref[i] = GRANT_INVALID_REF;
205 return ref; 228 return ref;
206} 229}
207 230
@@ -221,41 +244,40 @@ static bool xennet_can_sg(struct net_device *dev)
221 244
222static void rx_refill_timeout(unsigned long data) 245static void rx_refill_timeout(unsigned long data)
223{ 246{
224 struct net_device *dev = (struct net_device *)data; 247 struct netfront_queue *queue = (struct netfront_queue *)data;
225 struct netfront_info *np = netdev_priv(dev); 248 napi_schedule(&queue->napi);
226 napi_schedule(&np->napi);
227} 249}
228 250
229static int netfront_tx_slot_available(struct netfront_info *np) 251static int netfront_tx_slot_available(struct netfront_queue *queue)
230{ 252{
231 return (np->tx.req_prod_pvt - np->tx.rsp_cons) < 253 return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) <
232 (TX_MAX_TARGET - MAX_SKB_FRAGS - 2); 254 (TX_MAX_TARGET - MAX_SKB_FRAGS - 2);
233} 255}
234 256
235static void xennet_maybe_wake_tx(struct net_device *dev) 257static void xennet_maybe_wake_tx(struct netfront_queue *queue)
236{ 258{
237 struct netfront_info *np = netdev_priv(dev); 259 struct net_device *dev = queue->info->netdev;
260 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, queue->id);
238 261
239 if (unlikely(netif_queue_stopped(dev)) && 262 if (unlikely(netif_tx_queue_stopped(dev_queue)) &&
240 netfront_tx_slot_available(np) && 263 netfront_tx_slot_available(queue) &&
241 likely(netif_running(dev))) 264 likely(netif_running(dev)))
242 netif_wake_queue(dev); 265 netif_tx_wake_queue(netdev_get_tx_queue(dev, queue->id));
243} 266}
244 267
245static void xennet_alloc_rx_buffers(struct net_device *dev) 268static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
246{ 269{
247 unsigned short id; 270 unsigned short id;
248 struct netfront_info *np = netdev_priv(dev);
249 struct sk_buff *skb; 271 struct sk_buff *skb;
250 struct page *page; 272 struct page *page;
251 int i, batch_target, notify; 273 int i, batch_target, notify;
252 RING_IDX req_prod = np->rx.req_prod_pvt; 274 RING_IDX req_prod = queue->rx.req_prod_pvt;
253 grant_ref_t ref; 275 grant_ref_t ref;
254 unsigned long pfn; 276 unsigned long pfn;
255 void *vaddr; 277 void *vaddr;
256 struct xen_netif_rx_request *req; 278 struct xen_netif_rx_request *req;
257 279
258 if (unlikely(!netif_carrier_ok(dev))) 280 if (unlikely(!netif_carrier_ok(queue->info->netdev)))
259 return; 281 return;
260 282
261 /* 283 /*
@@ -264,9 +286,10 @@ static void xennet_alloc_rx_buffers(struct net_device *dev)
264 * allocator, so should reduce the chance of failed allocation requests 286 * allocator, so should reduce the chance of failed allocation requests
265 * both for ourself and for other kernel subsystems. 287 * both for ourself and for other kernel subsystems.
266 */ 288 */
267 batch_target = np->rx_target - (req_prod - np->rx.rsp_cons); 289 batch_target = queue->rx_target - (req_prod - queue->rx.rsp_cons);
268 for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) { 290 for (i = skb_queue_len(&queue->rx_batch); i < batch_target; i++) {
269 skb = __netdev_alloc_skb(dev, RX_COPY_THRESHOLD + NET_IP_ALIGN, 291 skb = __netdev_alloc_skb(queue->info->netdev,
292 RX_COPY_THRESHOLD + NET_IP_ALIGN,
270 GFP_ATOMIC | __GFP_NOWARN); 293 GFP_ATOMIC | __GFP_NOWARN);
271 if (unlikely(!skb)) 294 if (unlikely(!skb))
272 goto no_skb; 295 goto no_skb;
@@ -279,7 +302,7 @@ static void xennet_alloc_rx_buffers(struct net_device *dev)
279 kfree_skb(skb); 302 kfree_skb(skb);
280no_skb: 303no_skb:
281 /* Could not allocate any skbuffs. Try again later. */ 304 /* Could not allocate any skbuffs. Try again later. */
282 mod_timer(&np->rx_refill_timer, 305 mod_timer(&queue->rx_refill_timer,
283 jiffies + (HZ/10)); 306 jiffies + (HZ/10));
284 307
285 /* Any skbuffs queued for refill? Force them out. */ 308 /* Any skbuffs queued for refill? Force them out. */
@@ -289,44 +312,44 @@ no_skb:
289 } 312 }
290 313
291 skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE); 314 skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
292 __skb_queue_tail(&np->rx_batch, skb); 315 __skb_queue_tail(&queue->rx_batch, skb);
293 } 316 }
294 317
295 /* Is the batch large enough to be worthwhile? */ 318 /* Is the batch large enough to be worthwhile? */
296 if (i < (np->rx_target/2)) { 319 if (i < (queue->rx_target/2)) {
297 if (req_prod > np->rx.sring->req_prod) 320 if (req_prod > queue->rx.sring->req_prod)
298 goto push; 321 goto push;
299 return; 322 return;
300 } 323 }
301 324
302 /* Adjust our fill target if we risked running out of buffers. */ 325 /* Adjust our fill target if we risked running out of buffers. */
303 if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) && 326 if (((req_prod - queue->rx.sring->rsp_prod) < (queue->rx_target / 4)) &&
304 ((np->rx_target *= 2) > np->rx_max_target)) 327 ((queue->rx_target *= 2) > queue->rx_max_target))
305 np->rx_target = np->rx_max_target; 328 queue->rx_target = queue->rx_max_target;
306 329
307 refill: 330 refill:
308 for (i = 0; ; i++) { 331 for (i = 0; ; i++) {
309 skb = __skb_dequeue(&np->rx_batch); 332 skb = __skb_dequeue(&queue->rx_batch);
310 if (skb == NULL) 333 if (skb == NULL)
311 break; 334 break;
312 335
313 skb->dev = dev; 336 skb->dev = queue->info->netdev;
314 337
315 id = xennet_rxidx(req_prod + i); 338 id = xennet_rxidx(req_prod + i);
316 339
317 BUG_ON(np->rx_skbs[id]); 340 BUG_ON(queue->rx_skbs[id]);
318 np->rx_skbs[id] = skb; 341 queue->rx_skbs[id] = skb;
319 342
320 ref = gnttab_claim_grant_reference(&np->gref_rx_head); 343 ref = gnttab_claim_grant_reference(&queue->gref_rx_head);
321 BUG_ON((signed short)ref < 0); 344 BUG_ON((signed short)ref < 0);
322 np->grant_rx_ref[id] = ref; 345 queue->grant_rx_ref[id] = ref;
323 346
324 pfn = page_to_pfn(skb_frag_page(&skb_shinfo(skb)->frags[0])); 347 pfn = page_to_pfn(skb_frag_page(&skb_shinfo(skb)->frags[0]));
325 vaddr = page_address(skb_frag_page(&skb_shinfo(skb)->frags[0])); 348 vaddr = page_address(skb_frag_page(&skb_shinfo(skb)->frags[0]));
326 349
327 req = RING_GET_REQUEST(&np->rx, req_prod + i); 350 req = RING_GET_REQUEST(&queue->rx, req_prod + i);
328 gnttab_grant_foreign_access_ref(ref, 351 gnttab_grant_foreign_access_ref(ref,
329 np->xbdev->otherend_id, 352 queue->info->xbdev->otherend_id,
330 pfn_to_mfn(pfn), 353 pfn_to_mfn(pfn),
331 0); 354 0);
332 355
@@ -337,72 +360,77 @@ no_skb:
337 wmb(); /* barrier so backend seens requests */ 360 wmb(); /* barrier so backend seens requests */
338 361
339 /* Above is a suitable barrier to ensure backend will see requests. */ 362 /* Above is a suitable barrier to ensure backend will see requests. */
340 np->rx.req_prod_pvt = req_prod + i; 363 queue->rx.req_prod_pvt = req_prod + i;
341 push: 364 push:
342 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify); 365 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify);
343 if (notify) 366 if (notify)
344 notify_remote_via_irq(np->rx_irq); 367 notify_remote_via_irq(queue->rx_irq);
345} 368}
346 369
347static int xennet_open(struct net_device *dev) 370static int xennet_open(struct net_device *dev)
348{ 371{
349 struct netfront_info *np = netdev_priv(dev); 372 struct netfront_info *np = netdev_priv(dev);
350 373 unsigned int num_queues = dev->real_num_tx_queues;
351 napi_enable(&np->napi); 374 unsigned int i = 0;
352 375 struct netfront_queue *queue = NULL;
353 spin_lock_bh(&np->rx_lock); 376
354 if (netif_carrier_ok(dev)) { 377 for (i = 0; i < num_queues; ++i) {
355 xennet_alloc_rx_buffers(dev); 378 queue = &np->queues[i];
356 np->rx.sring->rsp_event = np->rx.rsp_cons + 1; 379 napi_enable(&queue->napi);
357 if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) 380
358 napi_schedule(&np->napi); 381 spin_lock_bh(&queue->rx_lock);
382 if (netif_carrier_ok(dev)) {
383 xennet_alloc_rx_buffers(queue);
384 queue->rx.sring->rsp_event = queue->rx.rsp_cons + 1;
385 if (RING_HAS_UNCONSUMED_RESPONSES(&queue->rx))
386 napi_schedule(&queue->napi);
387 }
388 spin_unlock_bh(&queue->rx_lock);
359 } 389 }
360 spin_unlock_bh(&np->rx_lock);
361 390
362 netif_start_queue(dev); 391 netif_tx_start_all_queues(dev);
363 392
364 return 0; 393 return 0;
365} 394}
366 395
367static void xennet_tx_buf_gc(struct net_device *dev) 396static void xennet_tx_buf_gc(struct netfront_queue *queue)
368{ 397{
369 RING_IDX cons, prod; 398 RING_IDX cons, prod;
370 unsigned short id; 399 unsigned short id;
371 struct netfront_info *np = netdev_priv(dev);
372 struct sk_buff *skb; 400 struct sk_buff *skb;
373 401
374 BUG_ON(!netif_carrier_ok(dev)); 402 BUG_ON(!netif_carrier_ok(queue->info->netdev));
375 403
376 do { 404 do {
377 prod = np->tx.sring->rsp_prod; 405 prod = queue->tx.sring->rsp_prod;
378 rmb(); /* Ensure we see responses up to 'rp'. */ 406 rmb(); /* Ensure we see responses up to 'rp'. */
379 407
380 for (cons = np->tx.rsp_cons; cons != prod; cons++) { 408 for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
381 struct xen_netif_tx_response *txrsp; 409 struct xen_netif_tx_response *txrsp;
382 410
383 txrsp = RING_GET_RESPONSE(&np->tx, cons); 411 txrsp = RING_GET_RESPONSE(&queue->tx, cons);
384 if (txrsp->status == XEN_NETIF_RSP_NULL) 412 if (txrsp->status == XEN_NETIF_RSP_NULL)
385 continue; 413 continue;
386 414
387 id = txrsp->id; 415 id = txrsp->id;
388 skb = np->tx_skbs[id].skb; 416 skb = queue->tx_skbs[id].skb;
389 if (unlikely(gnttab_query_foreign_access( 417 if (unlikely(gnttab_query_foreign_access(
390 np->grant_tx_ref[id]) != 0)) { 418 queue->grant_tx_ref[id]) != 0)) {
391 pr_alert("%s: warning -- grant still in use by backend domain\n", 419 pr_alert("%s: warning -- grant still in use by backend domain\n",
392 __func__); 420 __func__);
393 BUG(); 421 BUG();
394 } 422 }
395 gnttab_end_foreign_access_ref( 423 gnttab_end_foreign_access_ref(
396 np->grant_tx_ref[id], GNTMAP_readonly); 424 queue->grant_tx_ref[id], GNTMAP_readonly);
397 gnttab_release_grant_reference( 425 gnttab_release_grant_reference(
398 &np->gref_tx_head, np->grant_tx_ref[id]); 426 &queue->gref_tx_head, queue->grant_tx_ref[id]);
399 np->grant_tx_ref[id] = GRANT_INVALID_REF; 427 queue->grant_tx_ref[id] = GRANT_INVALID_REF;
400 np->grant_tx_page[id] = NULL; 428 queue->grant_tx_page[id] = NULL;
401 add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, id); 429 add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, id);
402 dev_kfree_skb_irq(skb); 430 dev_kfree_skb_irq(skb);
403 } 431 }
404 432
405 np->tx.rsp_cons = prod; 433 queue->tx.rsp_cons = prod;
406 434
407 /* 435 /*
408 * Set a new event, then check for race with update of tx_cons. 436 * Set a new event, then check for race with update of tx_cons.
@@ -412,21 +440,20 @@ static void xennet_tx_buf_gc(struct net_device *dev)
412 * data is outstanding: in such cases notification from Xen is 440 * data is outstanding: in such cases notification from Xen is
413 * likely to be the only kick that we'll get. 441 * likely to be the only kick that we'll get.
414 */ 442 */
415 np->tx.sring->rsp_event = 443 queue->tx.sring->rsp_event =
416 prod + ((np->tx.sring->req_prod - prod) >> 1) + 1; 444 prod + ((queue->tx.sring->req_prod - prod) >> 1) + 1;
417 mb(); /* update shared area */ 445 mb(); /* update shared area */
418 } while ((cons == prod) && (prod != np->tx.sring->rsp_prod)); 446 } while ((cons == prod) && (prod != queue->tx.sring->rsp_prod));
419 447
420 xennet_maybe_wake_tx(dev); 448 xennet_maybe_wake_tx(queue);
421} 449}
422 450
423static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev, 451static void xennet_make_frags(struct sk_buff *skb, struct netfront_queue *queue,
424 struct xen_netif_tx_request *tx) 452 struct xen_netif_tx_request *tx)
425{ 453{
426 struct netfront_info *np = netdev_priv(dev);
427 char *data = skb->data; 454 char *data = skb->data;
428 unsigned long mfn; 455 unsigned long mfn;
429 RING_IDX prod = np->tx.req_prod_pvt; 456 RING_IDX prod = queue->tx.req_prod_pvt;
430 int frags = skb_shinfo(skb)->nr_frags; 457 int frags = skb_shinfo(skb)->nr_frags;
431 unsigned int offset = offset_in_page(data); 458 unsigned int offset = offset_in_page(data);
432 unsigned int len = skb_headlen(skb); 459 unsigned int len = skb_headlen(skb);
@@ -443,19 +470,19 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
443 data += tx->size; 470 data += tx->size;
444 offset = 0; 471 offset = 0;
445 472
446 id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs); 473 id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
447 np->tx_skbs[id].skb = skb_get(skb); 474 queue->tx_skbs[id].skb = skb_get(skb);
448 tx = RING_GET_REQUEST(&np->tx, prod++); 475 tx = RING_GET_REQUEST(&queue->tx, prod++);
449 tx->id = id; 476 tx->id = id;
450 ref = gnttab_claim_grant_reference(&np->gref_tx_head); 477 ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
451 BUG_ON((signed short)ref < 0); 478 BUG_ON((signed short)ref < 0);
452 479
453 mfn = virt_to_mfn(data); 480 mfn = virt_to_mfn(data);
454 gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, 481 gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
455 mfn, GNTMAP_readonly); 482 mfn, GNTMAP_readonly);
456 483
457 np->grant_tx_page[id] = virt_to_page(data); 484 queue->grant_tx_page[id] = virt_to_page(data);
458 tx->gref = np->grant_tx_ref[id] = ref; 485 tx->gref = queue->grant_tx_ref[id] = ref;
459 tx->offset = offset; 486 tx->offset = offset;
460 tx->size = len; 487 tx->size = len;
461 tx->flags = 0; 488 tx->flags = 0;
@@ -487,21 +514,21 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
487 514
488 tx->flags |= XEN_NETTXF_more_data; 515 tx->flags |= XEN_NETTXF_more_data;
489 516
490 id = get_id_from_freelist(&np->tx_skb_freelist, 517 id = get_id_from_freelist(&queue->tx_skb_freelist,
491 np->tx_skbs); 518 queue->tx_skbs);
492 np->tx_skbs[id].skb = skb_get(skb); 519 queue->tx_skbs[id].skb = skb_get(skb);
493 tx = RING_GET_REQUEST(&np->tx, prod++); 520 tx = RING_GET_REQUEST(&queue->tx, prod++);
494 tx->id = id; 521 tx->id = id;
495 ref = gnttab_claim_grant_reference(&np->gref_tx_head); 522 ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
496 BUG_ON((signed short)ref < 0); 523 BUG_ON((signed short)ref < 0);
497 524
498 mfn = pfn_to_mfn(page_to_pfn(page)); 525 mfn = pfn_to_mfn(page_to_pfn(page));
499 gnttab_grant_foreign_access_ref(ref, 526 gnttab_grant_foreign_access_ref(ref,
500 np->xbdev->otherend_id, 527 queue->info->xbdev->otherend_id,
501 mfn, GNTMAP_readonly); 528 mfn, GNTMAP_readonly);
502 529
503 np->grant_tx_page[id] = page; 530 queue->grant_tx_page[id] = page;
504 tx->gref = np->grant_tx_ref[id] = ref; 531 tx->gref = queue->grant_tx_ref[id] = ref;
505 tx->offset = offset; 532 tx->offset = offset;
506 tx->size = bytes; 533 tx->size = bytes;
507 tx->flags = 0; 534 tx->flags = 0;
@@ -518,7 +545,7 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
518 } 545 }
519 } 546 }
520 547
521 np->tx.req_prod_pvt = prod; 548 queue->tx.req_prod_pvt = prod;
522} 549}
523 550
524/* 551/*
@@ -544,6 +571,24 @@ static int xennet_count_skb_frag_slots(struct sk_buff *skb)
544 return pages; 571 return pages;
545} 572}
546 573
574static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
575 void *accel_priv, select_queue_fallback_t fallback)
576{
577 unsigned int num_queues = dev->real_num_tx_queues;
578 u32 hash;
579 u16 queue_idx;
580
581 /* First, check if there is only one queue */
582 if (num_queues == 1) {
583 queue_idx = 0;
584 } else {
585 hash = skb_get_hash(skb);
586 queue_idx = hash % num_queues;
587 }
588
589 return queue_idx;
590}
591
547static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) 592static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
548{ 593{
549 unsigned short id; 594 unsigned short id;
@@ -559,6 +604,16 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
559 unsigned int offset = offset_in_page(data); 604 unsigned int offset = offset_in_page(data);
560 unsigned int len = skb_headlen(skb); 605 unsigned int len = skb_headlen(skb);
561 unsigned long flags; 606 unsigned long flags;
607 struct netfront_queue *queue = NULL;
608 unsigned int num_queues = dev->real_num_tx_queues;
609 u16 queue_index;
610
611 /* Drop the packet if no queues are set up */
612 if (num_queues < 1)
613 goto drop;
614 /* Determine which queue to transmit this SKB on */
615 queue_index = skb_get_queue_mapping(skb);
616 queue = &np->queues[queue_index];
562 617
563 /* If skb->len is too big for wire format, drop skb and alert 618 /* If skb->len is too big for wire format, drop skb and alert
564 * user about misconfiguration. 619 * user about misconfiguration.
@@ -578,30 +633,30 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
578 goto drop; 633 goto drop;
579 } 634 }
580 635
581 spin_lock_irqsave(&np->tx_lock, flags); 636 spin_lock_irqsave(&queue->tx_lock, flags);
582 637
583 if (unlikely(!netif_carrier_ok(dev) || 638 if (unlikely(!netif_carrier_ok(dev) ||
584 (slots > 1 && !xennet_can_sg(dev)) || 639 (slots > 1 && !xennet_can_sg(dev)) ||
585 netif_needs_gso(skb, netif_skb_features(skb)))) { 640 netif_needs_gso(skb, netif_skb_features(skb)))) {
586 spin_unlock_irqrestore(&np->tx_lock, flags); 641 spin_unlock_irqrestore(&queue->tx_lock, flags);
587 goto drop; 642 goto drop;
588 } 643 }
589 644
590 i = np->tx.req_prod_pvt; 645 i = queue->tx.req_prod_pvt;
591 646
592 id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs); 647 id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
593 np->tx_skbs[id].skb = skb; 648 queue->tx_skbs[id].skb = skb;
594 649
595 tx = RING_GET_REQUEST(&np->tx, i); 650 tx = RING_GET_REQUEST(&queue->tx, i);
596 651
597 tx->id = id; 652 tx->id = id;
598 ref = gnttab_claim_grant_reference(&np->gref_tx_head); 653 ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
599 BUG_ON((signed short)ref < 0); 654 BUG_ON((signed short)ref < 0);
600 mfn = virt_to_mfn(data); 655 mfn = virt_to_mfn(data);
601 gnttab_grant_foreign_access_ref( 656 gnttab_grant_foreign_access_ref(
602 ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly); 657 ref, queue->info->xbdev->otherend_id, mfn, GNTMAP_readonly);
603 np->grant_tx_page[id] = virt_to_page(data); 658 queue->grant_tx_page[id] = virt_to_page(data);
604 tx->gref = np->grant_tx_ref[id] = ref; 659 tx->gref = queue->grant_tx_ref[id] = ref;
605 tx->offset = offset; 660 tx->offset = offset;
606 tx->size = len; 661 tx->size = len;
607 662
@@ -617,7 +672,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
617 struct xen_netif_extra_info *gso; 672 struct xen_netif_extra_info *gso;
618 673
619 gso = (struct xen_netif_extra_info *) 674 gso = (struct xen_netif_extra_info *)
620 RING_GET_REQUEST(&np->tx, ++i); 675 RING_GET_REQUEST(&queue->tx, ++i);
621 676
622 tx->flags |= XEN_NETTXF_extra_info; 677 tx->flags |= XEN_NETTXF_extra_info;
623 678
@@ -632,14 +687,14 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
632 gso->flags = 0; 687 gso->flags = 0;
633 } 688 }
634 689
635 np->tx.req_prod_pvt = i + 1; 690 queue->tx.req_prod_pvt = i + 1;
636 691
637 xennet_make_frags(skb, dev, tx); 692 xennet_make_frags(skb, queue, tx);
638 tx->size = skb->len; 693 tx->size = skb->len;
639 694
640 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify); 695 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
641 if (notify) 696 if (notify)
642 notify_remote_via_irq(np->tx_irq); 697 notify_remote_via_irq(queue->tx_irq);
643 698
644 u64_stats_update_begin(&stats->syncp); 699 u64_stats_update_begin(&stats->syncp);
645 stats->tx_bytes += skb->len; 700 stats->tx_bytes += skb->len;
@@ -647,12 +702,12 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
647 u64_stats_update_end(&stats->syncp); 702 u64_stats_update_end(&stats->syncp);
648 703
649 /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */ 704 /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
650 xennet_tx_buf_gc(dev); 705 xennet_tx_buf_gc(queue);
651 706
652 if (!netfront_tx_slot_available(np)) 707 if (!netfront_tx_slot_available(queue))
653 netif_stop_queue(dev); 708 netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
654 709
655 spin_unlock_irqrestore(&np->tx_lock, flags); 710 spin_unlock_irqrestore(&queue->tx_lock, flags);
656 711
657 return NETDEV_TX_OK; 712 return NETDEV_TX_OK;
658 713
@@ -665,32 +720,38 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
665static int xennet_close(struct net_device *dev) 720static int xennet_close(struct net_device *dev)
666{ 721{
667 struct netfront_info *np = netdev_priv(dev); 722 struct netfront_info *np = netdev_priv(dev);
668 netif_stop_queue(np->netdev); 723 unsigned int num_queues = dev->real_num_tx_queues;
669 napi_disable(&np->napi); 724 unsigned int i;
725 struct netfront_queue *queue;
726 netif_tx_stop_all_queues(np->netdev);
727 for (i = 0; i < num_queues; ++i) {
728 queue = &np->queues[i];
729 napi_disable(&queue->napi);
730 }
670 return 0; 731 return 0;
671} 732}
672 733
673static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb, 734static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb,
674 grant_ref_t ref) 735 grant_ref_t ref)
675{ 736{
676 int new = xennet_rxidx(np->rx.req_prod_pvt); 737 int new = xennet_rxidx(queue->rx.req_prod_pvt);
677 738
678 BUG_ON(np->rx_skbs[new]); 739 BUG_ON(queue->rx_skbs[new]);
679 np->rx_skbs[new] = skb; 740 queue->rx_skbs[new] = skb;
680 np->grant_rx_ref[new] = ref; 741 queue->grant_rx_ref[new] = ref;
681 RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new; 742 RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->id = new;
682 RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref; 743 RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->gref = ref;
683 np->rx.req_prod_pvt++; 744 queue->rx.req_prod_pvt++;
684} 745}
685 746
686static int xennet_get_extras(struct netfront_info *np, 747static int xennet_get_extras(struct netfront_queue *queue,
687 struct xen_netif_extra_info *extras, 748 struct xen_netif_extra_info *extras,
688 RING_IDX rp) 749 RING_IDX rp)
689 750
690{ 751{
691 struct xen_netif_extra_info *extra; 752 struct xen_netif_extra_info *extra;
692 struct device *dev = &np->netdev->dev; 753 struct device *dev = &queue->info->netdev->dev;
693 RING_IDX cons = np->rx.rsp_cons; 754 RING_IDX cons = queue->rx.rsp_cons;
694 int err = 0; 755 int err = 0;
695 756
696 do { 757 do {
@@ -705,7 +766,7 @@ static int xennet_get_extras(struct netfront_info *np,
705 } 766 }
706 767
707 extra = (struct xen_netif_extra_info *) 768 extra = (struct xen_netif_extra_info *)
708 RING_GET_RESPONSE(&np->rx, ++cons); 769 RING_GET_RESPONSE(&queue->rx, ++cons);
709 770
710 if (unlikely(!extra->type || 771 if (unlikely(!extra->type ||
711 extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) { 772 extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
@@ -718,33 +779,33 @@ static int xennet_get_extras(struct netfront_info *np,
718 sizeof(*extra)); 779 sizeof(*extra));
719 } 780 }
720 781
721 skb = xennet_get_rx_skb(np, cons); 782 skb = xennet_get_rx_skb(queue, cons);
722 ref = xennet_get_rx_ref(np, cons); 783 ref = xennet_get_rx_ref(queue, cons);
723 xennet_move_rx_slot(np, skb, ref); 784 xennet_move_rx_slot(queue, skb, ref);
724 } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE); 785 } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
725 786
726 np->rx.rsp_cons = cons; 787 queue->rx.rsp_cons = cons;
727 return err; 788 return err;
728} 789}
729 790
730static int xennet_get_responses(struct netfront_info *np, 791static int xennet_get_responses(struct netfront_queue *queue,
731 struct netfront_rx_info *rinfo, RING_IDX rp, 792 struct netfront_rx_info *rinfo, RING_IDX rp,
732 struct sk_buff_head *list) 793 struct sk_buff_head *list)
733{ 794{
734 struct xen_netif_rx_response *rx = &rinfo->rx; 795 struct xen_netif_rx_response *rx = &rinfo->rx;
735 struct xen_netif_extra_info *extras = rinfo->extras; 796 struct xen_netif_extra_info *extras = rinfo->extras;
736 struct device *dev = &np->netdev->dev; 797 struct device *dev = &queue->info->netdev->dev;
737 RING_IDX cons = np->rx.rsp_cons; 798 RING_IDX cons = queue->rx.rsp_cons;
738 struct sk_buff *skb = xennet_get_rx_skb(np, cons); 799 struct sk_buff *skb = xennet_get_rx_skb(queue, cons);
739 grant_ref_t ref = xennet_get_rx_ref(np, cons); 800 grant_ref_t ref = xennet_get_rx_ref(queue, cons);
740 int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD); 801 int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD);
741 int slots = 1; 802 int slots = 1;
742 int err = 0; 803 int err = 0;
743 unsigned long ret; 804 unsigned long ret;
744 805
745 if (rx->flags & XEN_NETRXF_extra_info) { 806 if (rx->flags & XEN_NETRXF_extra_info) {
746 err = xennet_get_extras(np, extras, rp); 807 err = xennet_get_extras(queue, extras, rp);
747 cons = np->rx.rsp_cons; 808 cons = queue->rx.rsp_cons;
748 } 809 }
749 810
750 for (;;) { 811 for (;;) {
@@ -753,7 +814,7 @@ static int xennet_get_responses(struct netfront_info *np,
753 if (net_ratelimit()) 814 if (net_ratelimit())
754 dev_warn(dev, "rx->offset: %x, size: %u\n", 815 dev_warn(dev, "rx->offset: %x, size: %u\n",
755 rx->offset, rx->status); 816 rx->offset, rx->status);
756 xennet_move_rx_slot(np, skb, ref); 817 xennet_move_rx_slot(queue, skb, ref);
757 err = -EINVAL; 818 err = -EINVAL;
758 goto next; 819 goto next;
759 } 820 }
@@ -774,7 +835,7 @@ static int xennet_get_responses(struct netfront_info *np,
774 ret = gnttab_end_foreign_access_ref(ref, 0); 835 ret = gnttab_end_foreign_access_ref(ref, 0);
775 BUG_ON(!ret); 836 BUG_ON(!ret);
776 837
777 gnttab_release_grant_reference(&np->gref_rx_head, ref); 838 gnttab_release_grant_reference(&queue->gref_rx_head, ref);
778 839
779 __skb_queue_tail(list, skb); 840 __skb_queue_tail(list, skb);
780 841
@@ -789,9 +850,9 @@ next:
789 break; 850 break;
790 } 851 }
791 852
792 rx = RING_GET_RESPONSE(&np->rx, cons + slots); 853 rx = RING_GET_RESPONSE(&queue->rx, cons + slots);
793 skb = xennet_get_rx_skb(np, cons + slots); 854 skb = xennet_get_rx_skb(queue, cons + slots);
794 ref = xennet_get_rx_ref(np, cons + slots); 855 ref = xennet_get_rx_ref(queue, cons + slots);
795 slots++; 856 slots++;
796 } 857 }
797 858
@@ -802,7 +863,7 @@ next:
802 } 863 }
803 864
804 if (unlikely(err)) 865 if (unlikely(err))
805 np->rx.rsp_cons = cons + slots; 866 queue->rx.rsp_cons = cons + slots;
806 867
807 return err; 868 return err;
808} 869}
@@ -836,17 +897,17 @@ static int xennet_set_skb_gso(struct sk_buff *skb,
836 return 0; 897 return 0;
837} 898}
838 899
839static RING_IDX xennet_fill_frags(struct netfront_info *np, 900static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
840 struct sk_buff *skb, 901 struct sk_buff *skb,
841 struct sk_buff_head *list) 902 struct sk_buff_head *list)
842{ 903{
843 struct skb_shared_info *shinfo = skb_shinfo(skb); 904 struct skb_shared_info *shinfo = skb_shinfo(skb);
844 RING_IDX cons = np->rx.rsp_cons; 905 RING_IDX cons = queue->rx.rsp_cons;
845 struct sk_buff *nskb; 906 struct sk_buff *nskb;
846 907
847 while ((nskb = __skb_dequeue(list))) { 908 while ((nskb = __skb_dequeue(list))) {
848 struct xen_netif_rx_response *rx = 909 struct xen_netif_rx_response *rx =
849 RING_GET_RESPONSE(&np->rx, ++cons); 910 RING_GET_RESPONSE(&queue->rx, ++cons);
850 skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0]; 911 skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
851 912
852 if (shinfo->nr_frags == MAX_SKB_FRAGS) { 913 if (shinfo->nr_frags == MAX_SKB_FRAGS) {
@@ -879,7 +940,7 @@ static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
879 */ 940 */
880 if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) { 941 if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
881 struct netfront_info *np = netdev_priv(dev); 942 struct netfront_info *np = netdev_priv(dev);
882 np->rx_gso_checksum_fixup++; 943 atomic_inc(&np->rx_gso_checksum_fixup);
883 skb->ip_summed = CHECKSUM_PARTIAL; 944 skb->ip_summed = CHECKSUM_PARTIAL;
884 recalculate_partial_csum = true; 945 recalculate_partial_csum = true;
885 } 946 }
@@ -891,11 +952,10 @@ static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
891 return skb_checksum_setup(skb, recalculate_partial_csum); 952 return skb_checksum_setup(skb, recalculate_partial_csum);
892} 953}
893 954
894static int handle_incoming_queue(struct net_device *dev, 955static int handle_incoming_queue(struct netfront_queue *queue,
895 struct sk_buff_head *rxq) 956 struct sk_buff_head *rxq)
896{ 957{
897 struct netfront_info *np = netdev_priv(dev); 958 struct netfront_stats *stats = this_cpu_ptr(queue->info->stats);
898 struct netfront_stats *stats = this_cpu_ptr(np->stats);
899 int packets_dropped = 0; 959 int packets_dropped = 0;
900 struct sk_buff *skb; 960 struct sk_buff *skb;
901 961
@@ -906,13 +966,13 @@ static int handle_incoming_queue(struct net_device *dev,
906 __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); 966 __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
907 967
908 /* Ethernet work: Delayed to here as it peeks the header. */ 968 /* Ethernet work: Delayed to here as it peeks the header. */
909 skb->protocol = eth_type_trans(skb, dev); 969 skb->protocol = eth_type_trans(skb, queue->info->netdev);
910 skb_reset_network_header(skb); 970 skb_reset_network_header(skb);
911 971
912 if (checksum_setup(dev, skb)) { 972 if (checksum_setup(queue->info->netdev, skb)) {
913 kfree_skb(skb); 973 kfree_skb(skb);
914 packets_dropped++; 974 packets_dropped++;
915 dev->stats.rx_errors++; 975 queue->info->netdev->stats.rx_errors++;
916 continue; 976 continue;
917 } 977 }
918 978
@@ -922,7 +982,7 @@ static int handle_incoming_queue(struct net_device *dev,
922 u64_stats_update_end(&stats->syncp); 982 u64_stats_update_end(&stats->syncp);
923 983
924 /* Pass it up. */ 984 /* Pass it up. */
925 napi_gro_receive(&np->napi, skb); 985 napi_gro_receive(&queue->napi, skb);
926 } 986 }
927 987
928 return packets_dropped; 988 return packets_dropped;
@@ -930,8 +990,8 @@ static int handle_incoming_queue(struct net_device *dev,
930 990
931static int xennet_poll(struct napi_struct *napi, int budget) 991static int xennet_poll(struct napi_struct *napi, int budget)
932{ 992{
933 struct netfront_info *np = container_of(napi, struct netfront_info, napi); 993 struct netfront_queue *queue = container_of(napi, struct netfront_queue, napi);
934 struct net_device *dev = np->netdev; 994 struct net_device *dev = queue->info->netdev;
935 struct sk_buff *skb; 995 struct sk_buff *skb;
936 struct netfront_rx_info rinfo; 996 struct netfront_rx_info rinfo;
937 struct xen_netif_rx_response *rx = &rinfo.rx; 997 struct xen_netif_rx_response *rx = &rinfo.rx;
@@ -944,29 +1004,29 @@ static int xennet_poll(struct napi_struct *napi, int budget)
944 unsigned long flags; 1004 unsigned long flags;
945 int err; 1005 int err;
946 1006
947 spin_lock(&np->rx_lock); 1007 spin_lock(&queue->rx_lock);
948 1008
949 skb_queue_head_init(&rxq); 1009 skb_queue_head_init(&rxq);
950 skb_queue_head_init(&errq); 1010 skb_queue_head_init(&errq);
951 skb_queue_head_init(&tmpq); 1011 skb_queue_head_init(&tmpq);
952 1012
953 rp = np->rx.sring->rsp_prod; 1013 rp = queue->rx.sring->rsp_prod;
954 rmb(); /* Ensure we see queued responses up to 'rp'. */ 1014 rmb(); /* Ensure we see queued responses up to 'rp'. */
955 1015
956 i = np->rx.rsp_cons; 1016 i = queue->rx.rsp_cons;
957 work_done = 0; 1017 work_done = 0;
958 while ((i != rp) && (work_done < budget)) { 1018 while ((i != rp) && (work_done < budget)) {
959 memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx)); 1019 memcpy(rx, RING_GET_RESPONSE(&queue->rx, i), sizeof(*rx));
960 memset(extras, 0, sizeof(rinfo.extras)); 1020 memset(extras, 0, sizeof(rinfo.extras));
961 1021
962 err = xennet_get_responses(np, &rinfo, rp, &tmpq); 1022 err = xennet_get_responses(queue, &rinfo, rp, &tmpq);
963 1023
964 if (unlikely(err)) { 1024 if (unlikely(err)) {
965err: 1025err:
966 while ((skb = __skb_dequeue(&tmpq))) 1026 while ((skb = __skb_dequeue(&tmpq)))
967 __skb_queue_tail(&errq, skb); 1027 __skb_queue_tail(&errq, skb);
968 dev->stats.rx_errors++; 1028 dev->stats.rx_errors++;
969 i = np->rx.rsp_cons; 1029 i = queue->rx.rsp_cons;
970 continue; 1030 continue;
971 } 1031 }
972 1032
@@ -978,7 +1038,7 @@ err:
978 1038
979 if (unlikely(xennet_set_skb_gso(skb, gso))) { 1039 if (unlikely(xennet_set_skb_gso(skb, gso))) {
980 __skb_queue_head(&tmpq, skb); 1040 __skb_queue_head(&tmpq, skb);
981 np->rx.rsp_cons += skb_queue_len(&tmpq); 1041 queue->rx.rsp_cons += skb_queue_len(&tmpq);
982 goto err; 1042 goto err;
983 } 1043 }
984 } 1044 }
@@ -992,7 +1052,7 @@ err:
992 skb->data_len = rx->status; 1052 skb->data_len = rx->status;
993 skb->len += rx->status; 1053 skb->len += rx->status;
994 1054
995 i = xennet_fill_frags(np, skb, &tmpq); 1055 i = xennet_fill_frags(queue, skb, &tmpq);
996 1056
997 if (rx->flags & XEN_NETRXF_csum_blank) 1057 if (rx->flags & XEN_NETRXF_csum_blank)
998 skb->ip_summed = CHECKSUM_PARTIAL; 1058 skb->ip_summed = CHECKSUM_PARTIAL;
@@ -1001,22 +1061,22 @@ err:
1001 1061
1002 __skb_queue_tail(&rxq, skb); 1062 __skb_queue_tail(&rxq, skb);
1003 1063
1004 np->rx.rsp_cons = ++i; 1064 queue->rx.rsp_cons = ++i;
1005 work_done++; 1065 work_done++;
1006 } 1066 }
1007 1067
1008 __skb_queue_purge(&errq); 1068 __skb_queue_purge(&errq);
1009 1069
1010 work_done -= handle_incoming_queue(dev, &rxq); 1070 work_done -= handle_incoming_queue(queue, &rxq);
1011 1071
1012 /* If we get a callback with very few responses, reduce fill target. */ 1072 /* If we get a callback with very few responses, reduce fill target. */
1013 /* NB. Note exponential increase, linear decrease. */ 1073 /* NB. Note exponential increase, linear decrease. */
1014 if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) > 1074 if (((queue->rx.req_prod_pvt - queue->rx.sring->rsp_prod) >
1015 ((3*np->rx_target) / 4)) && 1075 ((3*queue->rx_target) / 4)) &&
1016 (--np->rx_target < np->rx_min_target)) 1076 (--queue->rx_target < queue->rx_min_target))
1017 np->rx_target = np->rx_min_target; 1077 queue->rx_target = queue->rx_min_target;
1018 1078
1019 xennet_alloc_rx_buffers(dev); 1079 xennet_alloc_rx_buffers(queue);
1020 1080
1021 if (work_done < budget) { 1081 if (work_done < budget) {
1022 int more_to_do = 0; 1082 int more_to_do = 0;
@@ -1025,14 +1085,14 @@ err:
1025 1085
1026 local_irq_save(flags); 1086 local_irq_save(flags);
1027 1087
1028 RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do); 1088 RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do);
1029 if (!more_to_do) 1089 if (!more_to_do)
1030 __napi_complete(napi); 1090 __napi_complete(napi);
1031 1091
1032 local_irq_restore(flags); 1092 local_irq_restore(flags);
1033 } 1093 }
1034 1094
1035 spin_unlock(&np->rx_lock); 1095 spin_unlock(&queue->rx_lock);
1036 1096
1037 return work_done; 1097 return work_done;
1038} 1098}
@@ -1080,43 +1140,43 @@ static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev,
1080 return tot; 1140 return tot;
1081} 1141}
1082 1142
1083static void xennet_release_tx_bufs(struct netfront_info *np) 1143static void xennet_release_tx_bufs(struct netfront_queue *queue)
1084{ 1144{
1085 struct sk_buff *skb; 1145 struct sk_buff *skb;
1086 int i; 1146 int i;
1087 1147
1088 for (i = 0; i < NET_TX_RING_SIZE; i++) { 1148 for (i = 0; i < NET_TX_RING_SIZE; i++) {
1089 /* Skip over entries which are actually freelist references */ 1149 /* Skip over entries which are actually freelist references */
1090 if (skb_entry_is_link(&np->tx_skbs[i])) 1150 if (skb_entry_is_link(&queue->tx_skbs[i]))
1091 continue; 1151 continue;
1092 1152
1093 skb = np->tx_skbs[i].skb; 1153 skb = queue->tx_skbs[i].skb;
1094 get_page(np->grant_tx_page[i]); 1154 get_page(queue->grant_tx_page[i]);
1095 gnttab_end_foreign_access(np->grant_tx_ref[i], 1155 gnttab_end_foreign_access(queue->grant_tx_ref[i],
1096 GNTMAP_readonly, 1156 GNTMAP_readonly,
1097 (unsigned long)page_address(np->grant_tx_page[i])); 1157 (unsigned long)page_address(queue->grant_tx_page[i]));
1098 np->grant_tx_page[i] = NULL; 1158 queue->grant_tx_page[i] = NULL;
1099 np->grant_tx_ref[i] = GRANT_INVALID_REF; 1159 queue->grant_tx_ref[i] = GRANT_INVALID_REF;
1100 add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, i); 1160 add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, i);
1101 dev_kfree_skb_irq(skb); 1161 dev_kfree_skb_irq(skb);
1102 } 1162 }
1103} 1163}
1104 1164
1105static void xennet_release_rx_bufs(struct netfront_info *np) 1165static void xennet_release_rx_bufs(struct netfront_queue *queue)
1106{ 1166{
1107 int id, ref; 1167 int id, ref;
1108 1168
1109 spin_lock_bh(&np->rx_lock); 1169 spin_lock_bh(&queue->rx_lock);
1110 1170
1111 for (id = 0; id < NET_RX_RING_SIZE; id++) { 1171 for (id = 0; id < NET_RX_RING_SIZE; id++) {
1112 struct sk_buff *skb; 1172 struct sk_buff *skb;
1113 struct page *page; 1173 struct page *page;
1114 1174
1115 skb = np->rx_skbs[id]; 1175 skb = queue->rx_skbs[id];
1116 if (!skb) 1176 if (!skb)
1117 continue; 1177 continue;
1118 1178
1119 ref = np->grant_rx_ref[id]; 1179 ref = queue->grant_rx_ref[id];
1120 if (ref == GRANT_INVALID_REF) 1180 if (ref == GRANT_INVALID_REF)
1121 continue; 1181 continue;
1122 1182
@@ -1128,21 +1188,28 @@ static void xennet_release_rx_bufs(struct netfront_info *np)
1128 get_page(page); 1188 get_page(page);
1129 gnttab_end_foreign_access(ref, 0, 1189 gnttab_end_foreign_access(ref, 0,
1130 (unsigned long)page_address(page)); 1190 (unsigned long)page_address(page));
1131 np->grant_rx_ref[id] = GRANT_INVALID_REF; 1191 queue->grant_rx_ref[id] = GRANT_INVALID_REF;
1132 1192
1133 kfree_skb(skb); 1193 kfree_skb(skb);
1134 } 1194 }
1135 1195
1136 spin_unlock_bh(&np->rx_lock); 1196 spin_unlock_bh(&queue->rx_lock);
1137} 1197}
1138 1198
1139static void xennet_uninit(struct net_device *dev) 1199static void xennet_uninit(struct net_device *dev)
1140{ 1200{
1141 struct netfront_info *np = netdev_priv(dev); 1201 struct netfront_info *np = netdev_priv(dev);
1142 xennet_release_tx_bufs(np); 1202 unsigned int num_queues = dev->real_num_tx_queues;
1143 xennet_release_rx_bufs(np); 1203 struct netfront_queue *queue;
1144 gnttab_free_grant_references(np->gref_tx_head); 1204 unsigned int i;
1145 gnttab_free_grant_references(np->gref_rx_head); 1205
1206 for (i = 0; i < num_queues; ++i) {
1207 queue = &np->queues[i];
1208 xennet_release_tx_bufs(queue);
1209 xennet_release_rx_bufs(queue);
1210 gnttab_free_grant_references(queue->gref_tx_head);
1211 gnttab_free_grant_references(queue->gref_rx_head);
1212 }
1146} 1213}
1147 1214
1148static netdev_features_t xennet_fix_features(struct net_device *dev, 1215static netdev_features_t xennet_fix_features(struct net_device *dev,
@@ -1203,25 +1270,24 @@ static int xennet_set_features(struct net_device *dev,
1203 1270
1204static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id) 1271static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
1205{ 1272{
1206 struct netfront_info *np = dev_id; 1273 struct netfront_queue *queue = dev_id;
1207 struct net_device *dev = np->netdev;
1208 unsigned long flags; 1274 unsigned long flags;
1209 1275
1210 spin_lock_irqsave(&np->tx_lock, flags); 1276 spin_lock_irqsave(&queue->tx_lock, flags);
1211 xennet_tx_buf_gc(dev); 1277 xennet_tx_buf_gc(queue);
1212 spin_unlock_irqrestore(&np->tx_lock, flags); 1278 spin_unlock_irqrestore(&queue->tx_lock, flags);
1213 1279
1214 return IRQ_HANDLED; 1280 return IRQ_HANDLED;
1215} 1281}
1216 1282
1217static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id) 1283static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
1218{ 1284{
1219 struct netfront_info *np = dev_id; 1285 struct netfront_queue *queue = dev_id;
1220 struct net_device *dev = np->netdev; 1286 struct net_device *dev = queue->info->netdev;
1221 1287
1222 if (likely(netif_carrier_ok(dev) && 1288 if (likely(netif_carrier_ok(dev) &&
1223 RING_HAS_UNCONSUMED_RESPONSES(&np->rx))) 1289 RING_HAS_UNCONSUMED_RESPONSES(&queue->rx)))
1224 napi_schedule(&np->napi); 1290 napi_schedule(&queue->napi);
1225 1291
1226 return IRQ_HANDLED; 1292 return IRQ_HANDLED;
1227} 1293}
@@ -1236,7 +1302,12 @@ static irqreturn_t xennet_interrupt(int irq, void *dev_id)
1236#ifdef CONFIG_NET_POLL_CONTROLLER 1302#ifdef CONFIG_NET_POLL_CONTROLLER
1237static void xennet_poll_controller(struct net_device *dev) 1303static void xennet_poll_controller(struct net_device *dev)
1238{ 1304{
1239 xennet_interrupt(0, dev); 1305 /* Poll each queue */
1306 struct netfront_info *info = netdev_priv(dev);
1307 unsigned int num_queues = dev->real_num_tx_queues;
1308 unsigned int i;
1309 for (i = 0; i < num_queues; ++i)
1310 xennet_interrupt(0, &info->queues[i]);
1240} 1311}
1241#endif 1312#endif
1242 1313
@@ -1251,6 +1322,7 @@ static const struct net_device_ops xennet_netdev_ops = {
1251 .ndo_validate_addr = eth_validate_addr, 1322 .ndo_validate_addr = eth_validate_addr,
1252 .ndo_fix_features = xennet_fix_features, 1323 .ndo_fix_features = xennet_fix_features,
1253 .ndo_set_features = xennet_set_features, 1324 .ndo_set_features = xennet_set_features,
1325 .ndo_select_queue = xennet_select_queue,
1254#ifdef CONFIG_NET_POLL_CONTROLLER 1326#ifdef CONFIG_NET_POLL_CONTROLLER
1255 .ndo_poll_controller = xennet_poll_controller, 1327 .ndo_poll_controller = xennet_poll_controller,
1256#endif 1328#endif
@@ -1258,66 +1330,30 @@ static const struct net_device_ops xennet_netdev_ops = {
1258 1330
1259static struct net_device *xennet_create_dev(struct xenbus_device *dev) 1331static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1260{ 1332{
1261 int i, err; 1333 int err;
1262 struct net_device *netdev; 1334 struct net_device *netdev;
1263 struct netfront_info *np; 1335 struct netfront_info *np;
1264 1336
1265 netdev = alloc_etherdev(sizeof(struct netfront_info)); 1337 netdev = alloc_etherdev_mq(sizeof(struct netfront_info), xennet_max_queues);
1266 if (!netdev) 1338 if (!netdev)
1267 return ERR_PTR(-ENOMEM); 1339 return ERR_PTR(-ENOMEM);
1268 1340
1269 np = netdev_priv(netdev); 1341 np = netdev_priv(netdev);
1270 np->xbdev = dev; 1342 np->xbdev = dev;
1271 1343
1272 spin_lock_init(&np->tx_lock); 1344 /* No need to use rtnl_lock() before the call below as it
1273 spin_lock_init(&np->rx_lock); 1345 * happens before register_netdev().
1274 1346 */
1275 skb_queue_head_init(&np->rx_batch); 1347 netif_set_real_num_tx_queues(netdev, 0);
1276 np->rx_target = RX_DFL_MIN_TARGET; 1348 np->queues = NULL;
1277 np->rx_min_target = RX_DFL_MIN_TARGET;
1278 np->rx_max_target = RX_MAX_TARGET;
1279
1280 init_timer(&np->rx_refill_timer);
1281 np->rx_refill_timer.data = (unsigned long)netdev;
1282 np->rx_refill_timer.function = rx_refill_timeout;
1283 1349
1284 err = -ENOMEM; 1350 err = -ENOMEM;
1285 np->stats = netdev_alloc_pcpu_stats(struct netfront_stats); 1351 np->stats = netdev_alloc_pcpu_stats(struct netfront_stats);
1286 if (np->stats == NULL) 1352 if (np->stats == NULL)
1287 goto exit; 1353 goto exit;
1288 1354
1289 /* Initialise tx_skbs as a free chain containing every entry. */
1290 np->tx_skb_freelist = 0;
1291 for (i = 0; i < NET_TX_RING_SIZE; i++) {
1292 skb_entry_set_link(&np->tx_skbs[i], i+1);
1293 np->grant_tx_ref[i] = GRANT_INVALID_REF;
1294 np->grant_tx_page[i] = NULL;
1295 }
1296
1297 /* Clear out rx_skbs */
1298 for (i = 0; i < NET_RX_RING_SIZE; i++) {
1299 np->rx_skbs[i] = NULL;
1300 np->grant_rx_ref[i] = GRANT_INVALID_REF;
1301 }
1302
1303 /* A grant for every tx ring slot */
1304 if (gnttab_alloc_grant_references(TX_MAX_TARGET,
1305 &np->gref_tx_head) < 0) {
1306 pr_alert("can't alloc tx grant refs\n");
1307 err = -ENOMEM;
1308 goto exit_free_stats;
1309 }
1310 /* A grant for every rx ring slot */
1311 if (gnttab_alloc_grant_references(RX_MAX_TARGET,
1312 &np->gref_rx_head) < 0) {
1313 pr_alert("can't alloc rx grant refs\n");
1314 err = -ENOMEM;
1315 goto exit_free_tx;
1316 }
1317
1318 netdev->netdev_ops = &xennet_netdev_ops; 1355 netdev->netdev_ops = &xennet_netdev_ops;
1319 1356
1320 netif_napi_add(netdev, &np->napi, xennet_poll, 64);
1321 netdev->features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM | 1357 netdev->features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
1322 NETIF_F_GSO_ROBUST; 1358 NETIF_F_GSO_ROBUST;
1323 netdev->hw_features = NETIF_F_SG | 1359 netdev->hw_features = NETIF_F_SG |
@@ -1332,7 +1368,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1332 */ 1368 */
1333 netdev->features |= netdev->hw_features; 1369 netdev->features |= netdev->hw_features;
1334 1370
1335 SET_ETHTOOL_OPS(netdev, &xennet_ethtool_ops); 1371 netdev->ethtool_ops = &xennet_ethtool_ops;
1336 SET_NETDEV_DEV(netdev, &dev->dev); 1372 SET_NETDEV_DEV(netdev, &dev->dev);
1337 1373
1338 netif_set_gso_max_size(netdev, XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER); 1374 netif_set_gso_max_size(netdev, XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER);
@@ -1343,10 +1379,6 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1343 1379
1344 return netdev; 1380 return netdev;
1345 1381
1346 exit_free_tx:
1347 gnttab_free_grant_references(np->gref_tx_head);
1348 exit_free_stats:
1349 free_percpu(np->stats);
1350 exit: 1382 exit:
1351 free_netdev(netdev); 1383 free_netdev(netdev);
1352 return ERR_PTR(err); 1384 return ERR_PTR(err);
@@ -1404,30 +1436,37 @@ static void xennet_end_access(int ref, void *page)
1404 1436
1405static void xennet_disconnect_backend(struct netfront_info *info) 1437static void xennet_disconnect_backend(struct netfront_info *info)
1406{ 1438{
1407 /* Stop old i/f to prevent errors whilst we rebuild the state. */ 1439 unsigned int i = 0;
1408 spin_lock_bh(&info->rx_lock); 1440 unsigned int num_queues = info->netdev->real_num_tx_queues;
1409 spin_lock_irq(&info->tx_lock); 1441
1410 netif_carrier_off(info->netdev); 1442 for (i = 0; i < num_queues; ++i) {
1411 spin_unlock_irq(&info->tx_lock); 1443 struct netfront_queue *queue = &info->queues[i];
1412 spin_unlock_bh(&info->rx_lock); 1444
1413 1445 /* Stop old i/f to prevent errors whilst we rebuild the state. */
1414 if (info->tx_irq && (info->tx_irq == info->rx_irq)) 1446 spin_lock_bh(&queue->rx_lock);
1415 unbind_from_irqhandler(info->tx_irq, info); 1447 spin_lock_irq(&queue->tx_lock);
1416 if (info->tx_irq && (info->tx_irq != info->rx_irq)) { 1448 netif_carrier_off(queue->info->netdev);
1417 unbind_from_irqhandler(info->tx_irq, info); 1449 spin_unlock_irq(&queue->tx_lock);
1418 unbind_from_irqhandler(info->rx_irq, info); 1450 spin_unlock_bh(&queue->rx_lock);
1419 } 1451
1420 info->tx_evtchn = info->rx_evtchn = 0; 1452 if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
1421 info->tx_irq = info->rx_irq = 0; 1453 unbind_from_irqhandler(queue->tx_irq, queue);
1454 if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) {
1455 unbind_from_irqhandler(queue->tx_irq, queue);
1456 unbind_from_irqhandler(queue->rx_irq, queue);
1457 }
1458 queue->tx_evtchn = queue->rx_evtchn = 0;
1459 queue->tx_irq = queue->rx_irq = 0;
1422 1460
1423 /* End access and free the pages */ 1461 /* End access and free the pages */
1424 xennet_end_access(info->tx_ring_ref, info->tx.sring); 1462 xennet_end_access(queue->tx_ring_ref, queue->tx.sring);
1425 xennet_end_access(info->rx_ring_ref, info->rx.sring); 1463 xennet_end_access(queue->rx_ring_ref, queue->rx.sring);
1426 1464
1427 info->tx_ring_ref = GRANT_INVALID_REF; 1465 queue->tx_ring_ref = GRANT_INVALID_REF;
1428 info->rx_ring_ref = GRANT_INVALID_REF; 1466 queue->rx_ring_ref = GRANT_INVALID_REF;
1429 info->tx.sring = NULL; 1467 queue->tx.sring = NULL;
1430 info->rx.sring = NULL; 1468 queue->rx.sring = NULL;
1469 }
1431} 1470}
1432 1471
1433/** 1472/**
@@ -1468,100 +1507,86 @@ static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
1468 return 0; 1507 return 0;
1469} 1508}
1470 1509
1471static int setup_netfront_single(struct netfront_info *info) 1510static int setup_netfront_single(struct netfront_queue *queue)
1472{ 1511{
1473 int err; 1512 int err;
1474 1513
1475 err = xenbus_alloc_evtchn(info->xbdev, &info->tx_evtchn); 1514 err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
1476 if (err < 0) 1515 if (err < 0)
1477 goto fail; 1516 goto fail;
1478 1517
1479 err = bind_evtchn_to_irqhandler(info->tx_evtchn, 1518 err = bind_evtchn_to_irqhandler(queue->tx_evtchn,
1480 xennet_interrupt, 1519 xennet_interrupt,
1481 0, info->netdev->name, info); 1520 0, queue->info->netdev->name, queue);
1482 if (err < 0) 1521 if (err < 0)
1483 goto bind_fail; 1522 goto bind_fail;
1484 info->rx_evtchn = info->tx_evtchn; 1523 queue->rx_evtchn = queue->tx_evtchn;
1485 info->rx_irq = info->tx_irq = err; 1524 queue->rx_irq = queue->tx_irq = err;
1486 1525
1487 return 0; 1526 return 0;
1488 1527
1489bind_fail: 1528bind_fail:
1490 xenbus_free_evtchn(info->xbdev, info->tx_evtchn); 1529 xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
1491 info->tx_evtchn = 0; 1530 queue->tx_evtchn = 0;
1492fail: 1531fail:
1493 return err; 1532 return err;
1494} 1533}
1495 1534
1496static int setup_netfront_split(struct netfront_info *info) 1535static int setup_netfront_split(struct netfront_queue *queue)
1497{ 1536{
1498 int err; 1537 int err;
1499 1538
1500 err = xenbus_alloc_evtchn(info->xbdev, &info->tx_evtchn); 1539 err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
1501 if (err < 0) 1540 if (err < 0)
1502 goto fail; 1541 goto fail;
1503 err = xenbus_alloc_evtchn(info->xbdev, &info->rx_evtchn); 1542 err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->rx_evtchn);
1504 if (err < 0) 1543 if (err < 0)
1505 goto alloc_rx_evtchn_fail; 1544 goto alloc_rx_evtchn_fail;
1506 1545
1507 snprintf(info->tx_irq_name, sizeof(info->tx_irq_name), 1546 snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
1508 "%s-tx", info->netdev->name); 1547 "%s-tx", queue->name);
1509 err = bind_evtchn_to_irqhandler(info->tx_evtchn, 1548 err = bind_evtchn_to_irqhandler(queue->tx_evtchn,
1510 xennet_tx_interrupt, 1549 xennet_tx_interrupt,
1511 0, info->tx_irq_name, info); 1550 0, queue->tx_irq_name, queue);
1512 if (err < 0) 1551 if (err < 0)
1513 goto bind_tx_fail; 1552 goto bind_tx_fail;
1514 info->tx_irq = err; 1553 queue->tx_irq = err;
1515 1554
1516 snprintf(info->rx_irq_name, sizeof(info->rx_irq_name), 1555 snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
1517 "%s-rx", info->netdev->name); 1556 "%s-rx", queue->name);
1518 err = bind_evtchn_to_irqhandler(info->rx_evtchn, 1557 err = bind_evtchn_to_irqhandler(queue->rx_evtchn,
1519 xennet_rx_interrupt, 1558 xennet_rx_interrupt,
1520 0, info->rx_irq_name, info); 1559 0, queue->rx_irq_name, queue);
1521 if (err < 0) 1560 if (err < 0)
1522 goto bind_rx_fail; 1561 goto bind_rx_fail;
1523 info->rx_irq = err; 1562 queue->rx_irq = err;
1524 1563
1525 return 0; 1564 return 0;
1526 1565
1527bind_rx_fail: 1566bind_rx_fail:
1528 unbind_from_irqhandler(info->tx_irq, info); 1567 unbind_from_irqhandler(queue->tx_irq, queue);
1529 info->tx_irq = 0; 1568 queue->tx_irq = 0;
1530bind_tx_fail: 1569bind_tx_fail:
1531 xenbus_free_evtchn(info->xbdev, info->rx_evtchn); 1570 xenbus_free_evtchn(queue->info->xbdev, queue->rx_evtchn);
1532 info->rx_evtchn = 0; 1571 queue->rx_evtchn = 0;
1533alloc_rx_evtchn_fail: 1572alloc_rx_evtchn_fail:
1534 xenbus_free_evtchn(info->xbdev, info->tx_evtchn); 1573 xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
1535 info->tx_evtchn = 0; 1574 queue->tx_evtchn = 0;
1536fail: 1575fail:
1537 return err; 1576 return err;
1538} 1577}
1539 1578
1540static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info) 1579static int setup_netfront(struct xenbus_device *dev,
1580 struct netfront_queue *queue, unsigned int feature_split_evtchn)
1541{ 1581{
1542 struct xen_netif_tx_sring *txs; 1582 struct xen_netif_tx_sring *txs;
1543 struct xen_netif_rx_sring *rxs; 1583 struct xen_netif_rx_sring *rxs;
1544 int err; 1584 int err;
1545 struct net_device *netdev = info->netdev;
1546 unsigned int feature_split_evtchn;
1547
1548 info->tx_ring_ref = GRANT_INVALID_REF;
1549 info->rx_ring_ref = GRANT_INVALID_REF;
1550 info->rx.sring = NULL;
1551 info->tx.sring = NULL;
1552 netdev->irq = 0;
1553
1554 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
1555 "feature-split-event-channels", "%u",
1556 &feature_split_evtchn);
1557 if (err < 0)
1558 feature_split_evtchn = 0;
1559 1585
1560 err = xen_net_read_mac(dev, netdev->dev_addr); 1586 queue->tx_ring_ref = GRANT_INVALID_REF;
1561 if (err) { 1587 queue->rx_ring_ref = GRANT_INVALID_REF;
1562 xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename); 1588 queue->rx.sring = NULL;
1563 goto fail; 1589 queue->tx.sring = NULL;
1564 }
1565 1590
1566 txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); 1591 txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1567 if (!txs) { 1592 if (!txs) {
@@ -1570,13 +1595,13 @@ static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info)
1570 goto fail; 1595 goto fail;
1571 } 1596 }
1572 SHARED_RING_INIT(txs); 1597 SHARED_RING_INIT(txs);
1573 FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE); 1598 FRONT_RING_INIT(&queue->tx, txs, PAGE_SIZE);
1574 1599
1575 err = xenbus_grant_ring(dev, virt_to_mfn(txs)); 1600 err = xenbus_grant_ring(dev, virt_to_mfn(txs));
1576 if (err < 0) 1601 if (err < 0)
1577 goto grant_tx_ring_fail; 1602 goto grant_tx_ring_fail;
1603 queue->tx_ring_ref = err;
1578 1604
1579 info->tx_ring_ref = err;
1580 rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); 1605 rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1581 if (!rxs) { 1606 if (!rxs) {
1582 err = -ENOMEM; 1607 err = -ENOMEM;
@@ -1584,21 +1609,21 @@ static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info)
1584 goto alloc_rx_ring_fail; 1609 goto alloc_rx_ring_fail;
1585 } 1610 }
1586 SHARED_RING_INIT(rxs); 1611 SHARED_RING_INIT(rxs);
1587 FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE); 1612 FRONT_RING_INIT(&queue->rx, rxs, PAGE_SIZE);
1588 1613
1589 err = xenbus_grant_ring(dev, virt_to_mfn(rxs)); 1614 err = xenbus_grant_ring(dev, virt_to_mfn(rxs));
1590 if (err < 0) 1615 if (err < 0)
1591 goto grant_rx_ring_fail; 1616 goto grant_rx_ring_fail;
1592 info->rx_ring_ref = err; 1617 queue->rx_ring_ref = err;
1593 1618
1594 if (feature_split_evtchn) 1619 if (feature_split_evtchn)
1595 err = setup_netfront_split(info); 1620 err = setup_netfront_split(queue);
1596 /* setup single event channel if 1621 /* setup single event channel if
1597 * a) feature-split-event-channels == 0 1622 * a) feature-split-event-channels == 0
1598 * b) feature-split-event-channels == 1 but failed to setup 1623 * b) feature-split-event-channels == 1 but failed to setup
1599 */ 1624 */
1600 if (!feature_split_evtchn || (feature_split_evtchn && err)) 1625 if (!feature_split_evtchn || (feature_split_evtchn && err))
1601 err = setup_netfront_single(info); 1626 err = setup_netfront_single(queue);
1602 1627
1603 if (err) 1628 if (err)
1604 goto alloc_evtchn_fail; 1629 goto alloc_evtchn_fail;
@@ -1609,17 +1634,225 @@ static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info)
1609 * granted pages because backend is not accessing it at this point. 1634 * granted pages because backend is not accessing it at this point.
1610 */ 1635 */
1611alloc_evtchn_fail: 1636alloc_evtchn_fail:
1612 gnttab_end_foreign_access_ref(info->rx_ring_ref, 0); 1637 gnttab_end_foreign_access_ref(queue->rx_ring_ref, 0);
1613grant_rx_ring_fail: 1638grant_rx_ring_fail:
1614 free_page((unsigned long)rxs); 1639 free_page((unsigned long)rxs);
1615alloc_rx_ring_fail: 1640alloc_rx_ring_fail:
1616 gnttab_end_foreign_access_ref(info->tx_ring_ref, 0); 1641 gnttab_end_foreign_access_ref(queue->tx_ring_ref, 0);
1617grant_tx_ring_fail: 1642grant_tx_ring_fail:
1618 free_page((unsigned long)txs); 1643 free_page((unsigned long)txs);
1619fail: 1644fail:
1620 return err; 1645 return err;
1621} 1646}
1622 1647
1648/* Queue-specific initialisation
1649 * This used to be done in xennet_create_dev() but must now
1650 * be run per-queue.
1651 */
1652static int xennet_init_queue(struct netfront_queue *queue)
1653{
1654 unsigned short i;
1655 int err = 0;
1656
1657 spin_lock_init(&queue->tx_lock);
1658 spin_lock_init(&queue->rx_lock);
1659
1660 skb_queue_head_init(&queue->rx_batch);
1661 queue->rx_target = RX_DFL_MIN_TARGET;
1662 queue->rx_min_target = RX_DFL_MIN_TARGET;
1663 queue->rx_max_target = RX_MAX_TARGET;
1664
1665 init_timer(&queue->rx_refill_timer);
1666 queue->rx_refill_timer.data = (unsigned long)queue;
1667 queue->rx_refill_timer.function = rx_refill_timeout;
1668
1669 snprintf(queue->name, sizeof(queue->name), "%s-q%u",
1670 queue->info->netdev->name, queue->id);
1671
1672 /* Initialise tx_skbs as a free chain containing every entry. */
1673 queue->tx_skb_freelist = 0;
1674 for (i = 0; i < NET_TX_RING_SIZE; i++) {
1675 skb_entry_set_link(&queue->tx_skbs[i], i+1);
1676 queue->grant_tx_ref[i] = GRANT_INVALID_REF;
1677 queue->grant_tx_page[i] = NULL;
1678 }
1679
1680 /* Clear out rx_skbs */
1681 for (i = 0; i < NET_RX_RING_SIZE; i++) {
1682 queue->rx_skbs[i] = NULL;
1683 queue->grant_rx_ref[i] = GRANT_INVALID_REF;
1684 }
1685
1686 /* A grant for every tx ring slot */
1687 if (gnttab_alloc_grant_references(TX_MAX_TARGET,
1688 &queue->gref_tx_head) < 0) {
1689 pr_alert("can't alloc tx grant refs\n");
1690 err = -ENOMEM;
1691 goto exit;
1692 }
1693
1694 /* A grant for every rx ring slot */
1695 if (gnttab_alloc_grant_references(RX_MAX_TARGET,
1696 &queue->gref_rx_head) < 0) {
1697 pr_alert("can't alloc rx grant refs\n");
1698 err = -ENOMEM;
1699 goto exit_free_tx;
1700 }
1701
1702 return 0;
1703
1704 exit_free_tx:
1705 gnttab_free_grant_references(queue->gref_tx_head);
1706 exit:
1707 return err;
1708}
1709
1710static int write_queue_xenstore_keys(struct netfront_queue *queue,
1711 struct xenbus_transaction *xbt, int write_hierarchical)
1712{
1713 /* Write the queue-specific keys into XenStore in the traditional
1714 * way for a single queue, or in a queue subkeys for multiple
1715 * queues.
1716 */
1717 struct xenbus_device *dev = queue->info->xbdev;
1718 int err;
1719 const char *message;
1720 char *path;
1721 size_t pathsize;
1722
1723 /* Choose the correct place to write the keys */
1724 if (write_hierarchical) {
1725 pathsize = strlen(dev->nodename) + 10;
1726 path = kzalloc(pathsize, GFP_KERNEL);
1727 if (!path) {
1728 err = -ENOMEM;
1729 message = "out of memory while writing ring references";
1730 goto error;
1731 }
1732 snprintf(path, pathsize, "%s/queue-%u",
1733 dev->nodename, queue->id);
1734 } else {
1735 path = (char *)dev->nodename;
1736 }
1737
1738 /* Write ring references */
1739 err = xenbus_printf(*xbt, path, "tx-ring-ref", "%u",
1740 queue->tx_ring_ref);
1741 if (err) {
1742 message = "writing tx-ring-ref";
1743 goto error;
1744 }
1745
1746 err = xenbus_printf(*xbt, path, "rx-ring-ref", "%u",
1747 queue->rx_ring_ref);
1748 if (err) {
1749 message = "writing rx-ring-ref";
1750 goto error;
1751 }
1752
1753 /* Write event channels; taking into account both shared
1754 * and split event channel scenarios.
1755 */
1756 if (queue->tx_evtchn == queue->rx_evtchn) {
1757 /* Shared event channel */
1758 err = xenbus_printf(*xbt, path,
1759 "event-channel", "%u", queue->tx_evtchn);
1760 if (err) {
1761 message = "writing event-channel";
1762 goto error;
1763 }
1764 } else {
1765 /* Split event channels */
1766 err = xenbus_printf(*xbt, path,
1767 "event-channel-tx", "%u", queue->tx_evtchn);
1768 if (err) {
1769 message = "writing event-channel-tx";
1770 goto error;
1771 }
1772
1773 err = xenbus_printf(*xbt, path,
1774 "event-channel-rx", "%u", queue->rx_evtchn);
1775 if (err) {
1776 message = "writing event-channel-rx";
1777 goto error;
1778 }
1779 }
1780
1781 if (write_hierarchical)
1782 kfree(path);
1783 return 0;
1784
1785error:
1786 if (write_hierarchical)
1787 kfree(path);
1788 xenbus_dev_fatal(dev, err, "%s", message);
1789 return err;
1790}
1791
1792static void xennet_destroy_queues(struct netfront_info *info)
1793{
1794 unsigned int i;
1795
1796 rtnl_lock();
1797
1798 for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
1799 struct netfront_queue *queue = &info->queues[i];
1800
1801 if (netif_running(info->netdev))
1802 napi_disable(&queue->napi);
1803 netif_napi_del(&queue->napi);
1804 }
1805
1806 rtnl_unlock();
1807
1808 kfree(info->queues);
1809 info->queues = NULL;
1810}
1811
1812static int xennet_create_queues(struct netfront_info *info,
1813 unsigned int num_queues)
1814{
1815 unsigned int i;
1816 int ret;
1817
1818 info->queues = kcalloc(num_queues, sizeof(struct netfront_queue),
1819 GFP_KERNEL);
1820 if (!info->queues)
1821 return -ENOMEM;
1822
1823 rtnl_lock();
1824
1825 for (i = 0; i < num_queues; i++) {
1826 struct netfront_queue *queue = &info->queues[i];
1827
1828 queue->id = i;
1829 queue->info = info;
1830
1831 ret = xennet_init_queue(queue);
1832 if (ret < 0) {
1833 dev_warn(&info->netdev->dev, "only created %d queues\n",
1834 num_queues);
1835 num_queues = i;
1836 break;
1837 }
1838
1839 netif_napi_add(queue->info->netdev, &queue->napi,
1840 xennet_poll, 64);
1841 if (netif_running(info->netdev))
1842 napi_enable(&queue->napi);
1843 }
1844
1845 netif_set_real_num_tx_queues(info->netdev, num_queues);
1846
1847 rtnl_unlock();
1848
1849 if (num_queues == 0) {
1850 dev_err(&info->netdev->dev, "no queues\n");
1851 return -EINVAL;
1852 }
1853 return 0;
1854}
1855
1623/* Common code used when first setting up, and when resuming. */ 1856/* Common code used when first setting up, and when resuming. */
1624static int talk_to_netback(struct xenbus_device *dev, 1857static int talk_to_netback(struct xenbus_device *dev,
1625 struct netfront_info *info) 1858 struct netfront_info *info)
@@ -1627,11 +1860,61 @@ static int talk_to_netback(struct xenbus_device *dev,
1627 const char *message; 1860 const char *message;
1628 struct xenbus_transaction xbt; 1861 struct xenbus_transaction xbt;
1629 int err; 1862 int err;
1863 unsigned int feature_split_evtchn;
1864 unsigned int i = 0;
1865 unsigned int max_queues = 0;
1866 struct netfront_queue *queue = NULL;
1867 unsigned int num_queues = 1;
1630 1868
1631 /* Create shared ring, alloc event channel. */ 1869 info->netdev->irq = 0;
1632 err = setup_netfront(dev, info); 1870
1633 if (err) 1871 /* Check if backend supports multiple queues */
1872 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
1873 "multi-queue-max-queues", "%u", &max_queues);
1874 if (err < 0)
1875 max_queues = 1;
1876 num_queues = min(max_queues, xennet_max_queues);
1877
1878 /* Check feature-split-event-channels */
1879 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
1880 "feature-split-event-channels", "%u",
1881 &feature_split_evtchn);
1882 if (err < 0)
1883 feature_split_evtchn = 0;
1884
1885 /* Read mac addr. */
1886 err = xen_net_read_mac(dev, info->netdev->dev_addr);
1887 if (err) {
1888 xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
1634 goto out; 1889 goto out;
1890 }
1891
1892 if (info->queues)
1893 xennet_destroy_queues(info);
1894
1895 err = xennet_create_queues(info, num_queues);
1896 if (err < 0)
1897 goto destroy_ring;
1898
1899 /* Create shared ring, alloc event channel -- for each queue */
1900 for (i = 0; i < num_queues; ++i) {
1901 queue = &info->queues[i];
1902 err = setup_netfront(dev, queue, feature_split_evtchn);
1903 if (err) {
1904 /* setup_netfront() will tidy up the current
1905 * queue on error, but we need to clean up
1906 * those already allocated.
1907 */
1908 if (i > 0) {
1909 rtnl_lock();
1910 netif_set_real_num_tx_queues(info->netdev, i);
1911 rtnl_unlock();
1912 goto destroy_ring;
1913 } else {
1914 goto out;
1915 }
1916 }
1917 }
1635 1918
1636again: 1919again:
1637 err = xenbus_transaction_start(&xbt); 1920 err = xenbus_transaction_start(&xbt);
@@ -1640,41 +1923,29 @@ again:
1640 goto destroy_ring; 1923 goto destroy_ring;
1641 } 1924 }
1642 1925
1643 err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref", "%u", 1926 if (num_queues == 1) {
1644 info->tx_ring_ref); 1927 err = write_queue_xenstore_keys(&info->queues[0], &xbt, 0); /* flat */
1645 if (err) { 1928 if (err)
1646 message = "writing tx ring-ref"; 1929 goto abort_transaction_no_dev_fatal;
1647 goto abort_transaction;
1648 }
1649 err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref", "%u",
1650 info->rx_ring_ref);
1651 if (err) {
1652 message = "writing rx ring-ref";
1653 goto abort_transaction;
1654 }
1655
1656 if (info->tx_evtchn == info->rx_evtchn) {
1657 err = xenbus_printf(xbt, dev->nodename,
1658 "event-channel", "%u", info->tx_evtchn);
1659 if (err) {
1660 message = "writing event-channel";
1661 goto abort_transaction;
1662 }
1663 } else { 1930 } else {
1664 err = xenbus_printf(xbt, dev->nodename, 1931 /* Write the number of queues */
1665 "event-channel-tx", "%u", info->tx_evtchn); 1932 err = xenbus_printf(xbt, dev->nodename, "multi-queue-num-queues",
1933 "%u", num_queues);
1666 if (err) { 1934 if (err) {
1667 message = "writing event-channel-tx"; 1935 message = "writing multi-queue-num-queues";
1668 goto abort_transaction; 1936 goto abort_transaction_no_dev_fatal;
1669 } 1937 }
1670 err = xenbus_printf(xbt, dev->nodename, 1938
1671 "event-channel-rx", "%u", info->rx_evtchn); 1939 /* Write the keys for each queue */
1672 if (err) { 1940 for (i = 0; i < num_queues; ++i) {
1673 message = "writing event-channel-rx"; 1941 queue = &info->queues[i];
1674 goto abort_transaction; 1942 err = write_queue_xenstore_keys(queue, &xbt, 1); /* hierarchical */
1943 if (err)
1944 goto abort_transaction_no_dev_fatal;
1675 } 1945 }
1676 } 1946 }
1677 1947
1948 /* The remaining keys are not queue-specific */
1678 err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u", 1949 err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
1679 1); 1950 1);
1680 if (err) { 1951 if (err) {
@@ -1724,10 +1995,16 @@ again:
1724 return 0; 1995 return 0;
1725 1996
1726 abort_transaction: 1997 abort_transaction:
1727 xenbus_transaction_end(xbt, 1);
1728 xenbus_dev_fatal(dev, err, "%s", message); 1998 xenbus_dev_fatal(dev, err, "%s", message);
1999abort_transaction_no_dev_fatal:
2000 xenbus_transaction_end(xbt, 1);
1729 destroy_ring: 2001 destroy_ring:
1730 xennet_disconnect_backend(info); 2002 xennet_disconnect_backend(info);
2003 kfree(info->queues);
2004 info->queues = NULL;
2005 rtnl_lock();
2006 netif_set_real_num_tx_queues(info->netdev, 0);
2007 rtnl_lock();
1731 out: 2008 out:
1732 return err; 2009 return err;
1733} 2010}
@@ -1735,11 +2012,14 @@ again:
1735static int xennet_connect(struct net_device *dev) 2012static int xennet_connect(struct net_device *dev)
1736{ 2013{
1737 struct netfront_info *np = netdev_priv(dev); 2014 struct netfront_info *np = netdev_priv(dev);
2015 unsigned int num_queues = 0;
1738 int i, requeue_idx, err; 2016 int i, requeue_idx, err;
1739 struct sk_buff *skb; 2017 struct sk_buff *skb;
1740 grant_ref_t ref; 2018 grant_ref_t ref;
1741 struct xen_netif_rx_request *req; 2019 struct xen_netif_rx_request *req;
1742 unsigned int feature_rx_copy; 2020 unsigned int feature_rx_copy;
2021 unsigned int j = 0;
2022 struct netfront_queue *queue = NULL;
1743 2023
1744 err = xenbus_scanf(XBT_NIL, np->xbdev->otherend, 2024 err = xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1745 "feature-rx-copy", "%u", &feature_rx_copy); 2025 "feature-rx-copy", "%u", &feature_rx_copy);
@@ -1756,40 +2036,47 @@ static int xennet_connect(struct net_device *dev)
1756 if (err) 2036 if (err)
1757 return err; 2037 return err;
1758 2038
2039 /* talk_to_netback() sets the correct number of queues */
2040 num_queues = dev->real_num_tx_queues;
2041
1759 rtnl_lock(); 2042 rtnl_lock();
1760 netdev_update_features(dev); 2043 netdev_update_features(dev);
1761 rtnl_unlock(); 2044 rtnl_unlock();
1762 2045
1763 spin_lock_bh(&np->rx_lock); 2046 /* By now, the queue structures have been set up */
1764 spin_lock_irq(&np->tx_lock); 2047 for (j = 0; j < num_queues; ++j) {
2048 queue = &np->queues[j];
2049 spin_lock_bh(&queue->rx_lock);
2050 spin_lock_irq(&queue->tx_lock);
1765 2051
1766 /* Step 1: Discard all pending TX packet fragments. */ 2052 /* Step 1: Discard all pending TX packet fragments. */
1767 xennet_release_tx_bufs(np); 2053 xennet_release_tx_bufs(queue);
1768 2054
1769 /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */ 2055 /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
1770 for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) { 2056 for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
1771 skb_frag_t *frag; 2057 skb_frag_t *frag;
1772 const struct page *page; 2058 const struct page *page;
1773 if (!np->rx_skbs[i]) 2059 if (!queue->rx_skbs[i])
1774 continue; 2060 continue;
1775 2061
1776 skb = np->rx_skbs[requeue_idx] = xennet_get_rx_skb(np, i); 2062 skb = queue->rx_skbs[requeue_idx] = xennet_get_rx_skb(queue, i);
1777 ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i); 2063 ref = queue->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(queue, i);
1778 req = RING_GET_REQUEST(&np->rx, requeue_idx); 2064 req = RING_GET_REQUEST(&queue->rx, requeue_idx);
1779 2065
1780 frag = &skb_shinfo(skb)->frags[0]; 2066 frag = &skb_shinfo(skb)->frags[0];
1781 page = skb_frag_page(frag); 2067 page = skb_frag_page(frag);
1782 gnttab_grant_foreign_access_ref( 2068 gnttab_grant_foreign_access_ref(
1783 ref, np->xbdev->otherend_id, 2069 ref, queue->info->xbdev->otherend_id,
1784 pfn_to_mfn(page_to_pfn(page)), 2070 pfn_to_mfn(page_to_pfn(page)),
1785 0); 2071 0);
1786 req->gref = ref; 2072 req->gref = ref;
1787 req->id = requeue_idx; 2073 req->id = requeue_idx;
1788 2074
1789 requeue_idx++; 2075 requeue_idx++;
1790 } 2076 }
1791 2077
1792 np->rx.req_prod_pvt = requeue_idx; 2078 queue->rx.req_prod_pvt = requeue_idx;
2079 }
1793 2080
1794 /* 2081 /*
1795 * Step 3: All public and private state should now be sane. Get 2082 * Step 3: All public and private state should now be sane. Get
@@ -1798,14 +2085,17 @@ static int xennet_connect(struct net_device *dev)
1798 * packets. 2085 * packets.
1799 */ 2086 */
1800 netif_carrier_on(np->netdev); 2087 netif_carrier_on(np->netdev);
1801 notify_remote_via_irq(np->tx_irq); 2088 for (j = 0; j < num_queues; ++j) {
1802 if (np->tx_irq != np->rx_irq) 2089 queue = &np->queues[j];
1803 notify_remote_via_irq(np->rx_irq); 2090 notify_remote_via_irq(queue->tx_irq);
1804 xennet_tx_buf_gc(dev); 2091 if (queue->tx_irq != queue->rx_irq)
1805 xennet_alloc_rx_buffers(dev); 2092 notify_remote_via_irq(queue->rx_irq);
1806 2093 xennet_tx_buf_gc(queue);
1807 spin_unlock_irq(&np->tx_lock); 2094 xennet_alloc_rx_buffers(queue);
1808 spin_unlock_bh(&np->rx_lock); 2095
2096 spin_unlock_irq(&queue->tx_lock);
2097 spin_unlock_bh(&queue->rx_lock);
2098 }
1809 2099
1810 return 0; 2100 return 0;
1811} 2101}
@@ -1878,7 +2168,7 @@ static void xennet_get_ethtool_stats(struct net_device *dev,
1878 int i; 2168 int i;
1879 2169
1880 for (i = 0; i < ARRAY_SIZE(xennet_stats); i++) 2170 for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
1881 data[i] = *(unsigned long *)(np + xennet_stats[i].offset); 2171 data[i] = atomic_read((atomic_t *)(np + xennet_stats[i].offset));
1882} 2172}
1883 2173
1884static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data) 2174static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
@@ -1909,8 +2199,12 @@ static ssize_t show_rxbuf_min(struct device *dev,
1909{ 2199{
1910 struct net_device *netdev = to_net_dev(dev); 2200 struct net_device *netdev = to_net_dev(dev);
1911 struct netfront_info *info = netdev_priv(netdev); 2201 struct netfront_info *info = netdev_priv(netdev);
2202 unsigned int num_queues = netdev->real_num_tx_queues;
1912 2203
1913 return sprintf(buf, "%u\n", info->rx_min_target); 2204 if (num_queues)
2205 return sprintf(buf, "%u\n", info->queues[0].rx_min_target);
2206 else
2207 return sprintf(buf, "%u\n", RX_MIN_TARGET);
1914} 2208}
1915 2209
1916static ssize_t store_rxbuf_min(struct device *dev, 2210static ssize_t store_rxbuf_min(struct device *dev,
@@ -1919,8 +2213,11 @@ static ssize_t store_rxbuf_min(struct device *dev,
1919{ 2213{
1920 struct net_device *netdev = to_net_dev(dev); 2214 struct net_device *netdev = to_net_dev(dev);
1921 struct netfront_info *np = netdev_priv(netdev); 2215 struct netfront_info *np = netdev_priv(netdev);
2216 unsigned int num_queues = netdev->real_num_tx_queues;
1922 char *endp; 2217 char *endp;
1923 unsigned long target; 2218 unsigned long target;
2219 unsigned int i;
2220 struct netfront_queue *queue;
1924 2221
1925 if (!capable(CAP_NET_ADMIN)) 2222 if (!capable(CAP_NET_ADMIN))
1926 return -EPERM; 2223 return -EPERM;
@@ -1934,16 +2231,19 @@ static ssize_t store_rxbuf_min(struct device *dev,
1934 if (target > RX_MAX_TARGET) 2231 if (target > RX_MAX_TARGET)
1935 target = RX_MAX_TARGET; 2232 target = RX_MAX_TARGET;
1936 2233
1937 spin_lock_bh(&np->rx_lock); 2234 for (i = 0; i < num_queues; ++i) {
1938 if (target > np->rx_max_target) 2235 queue = &np->queues[i];
1939 np->rx_max_target = target; 2236 spin_lock_bh(&queue->rx_lock);
1940 np->rx_min_target = target; 2237 if (target > queue->rx_max_target)
1941 if (target > np->rx_target) 2238 queue->rx_max_target = target;
1942 np->rx_target = target; 2239 queue->rx_min_target = target;
2240 if (target > queue->rx_target)
2241 queue->rx_target = target;
1943 2242
1944 xennet_alloc_rx_buffers(netdev); 2243 xennet_alloc_rx_buffers(queue);
1945 2244
1946 spin_unlock_bh(&np->rx_lock); 2245 spin_unlock_bh(&queue->rx_lock);
2246 }
1947 return len; 2247 return len;
1948} 2248}
1949 2249
@@ -1952,8 +2252,12 @@ static ssize_t show_rxbuf_max(struct device *dev,
1952{ 2252{
1953 struct net_device *netdev = to_net_dev(dev); 2253 struct net_device *netdev = to_net_dev(dev);
1954 struct netfront_info *info = netdev_priv(netdev); 2254 struct netfront_info *info = netdev_priv(netdev);
2255 unsigned int num_queues = netdev->real_num_tx_queues;
1955 2256
1956 return sprintf(buf, "%u\n", info->rx_max_target); 2257 if (num_queues)
2258 return sprintf(buf, "%u\n", info->queues[0].rx_max_target);
2259 else
2260 return sprintf(buf, "%u\n", RX_MAX_TARGET);
1957} 2261}
1958 2262
1959static ssize_t store_rxbuf_max(struct device *dev, 2263static ssize_t store_rxbuf_max(struct device *dev,
@@ -1962,8 +2266,11 @@ static ssize_t store_rxbuf_max(struct device *dev,
1962{ 2266{
1963 struct net_device *netdev = to_net_dev(dev); 2267 struct net_device *netdev = to_net_dev(dev);
1964 struct netfront_info *np = netdev_priv(netdev); 2268 struct netfront_info *np = netdev_priv(netdev);
2269 unsigned int num_queues = netdev->real_num_tx_queues;
1965 char *endp; 2270 char *endp;
1966 unsigned long target; 2271 unsigned long target;
2272 unsigned int i = 0;
2273 struct netfront_queue *queue = NULL;
1967 2274
1968 if (!capable(CAP_NET_ADMIN)) 2275 if (!capable(CAP_NET_ADMIN))
1969 return -EPERM; 2276 return -EPERM;
@@ -1977,16 +2284,19 @@ static ssize_t store_rxbuf_max(struct device *dev,
1977 if (target > RX_MAX_TARGET) 2284 if (target > RX_MAX_TARGET)
1978 target = RX_MAX_TARGET; 2285 target = RX_MAX_TARGET;
1979 2286
1980 spin_lock_bh(&np->rx_lock); 2287 for (i = 0; i < num_queues; ++i) {
1981 if (target < np->rx_min_target) 2288 queue = &np->queues[i];
1982 np->rx_min_target = target; 2289 spin_lock_bh(&queue->rx_lock);
1983 np->rx_max_target = target; 2290 if (target < queue->rx_min_target)
1984 if (target < np->rx_target) 2291 queue->rx_min_target = target;
1985 np->rx_target = target; 2292 queue->rx_max_target = target;
2293 if (target < queue->rx_target)
2294 queue->rx_target = target;
1986 2295
1987 xennet_alloc_rx_buffers(netdev); 2296 xennet_alloc_rx_buffers(queue);
1988 2297
1989 spin_unlock_bh(&np->rx_lock); 2298 spin_unlock_bh(&queue->rx_lock);
2299 }
1990 return len; 2300 return len;
1991} 2301}
1992 2302
@@ -1995,8 +2305,12 @@ static ssize_t show_rxbuf_cur(struct device *dev,
1995{ 2305{
1996 struct net_device *netdev = to_net_dev(dev); 2306 struct net_device *netdev = to_net_dev(dev);
1997 struct netfront_info *info = netdev_priv(netdev); 2307 struct netfront_info *info = netdev_priv(netdev);
2308 unsigned int num_queues = netdev->real_num_tx_queues;
1998 2309
1999 return sprintf(buf, "%u\n", info->rx_target); 2310 if (num_queues)
2311 return sprintf(buf, "%u\n", info->queues[0].rx_target);
2312 else
2313 return sprintf(buf, "0\n");
2000} 2314}
2001 2315
2002static struct device_attribute xennet_attrs[] = { 2316static struct device_attribute xennet_attrs[] = {
@@ -2043,6 +2357,9 @@ static const struct xenbus_device_id netfront_ids[] = {
2043static int xennet_remove(struct xenbus_device *dev) 2357static int xennet_remove(struct xenbus_device *dev)
2044{ 2358{
2045 struct netfront_info *info = dev_get_drvdata(&dev->dev); 2359 struct netfront_info *info = dev_get_drvdata(&dev->dev);
2360 unsigned int num_queues = info->netdev->real_num_tx_queues;
2361 struct netfront_queue *queue = NULL;
2362 unsigned int i = 0;
2046 2363
2047 dev_dbg(&dev->dev, "%s\n", dev->nodename); 2364 dev_dbg(&dev->dev, "%s\n", dev->nodename);
2048 2365
@@ -2052,7 +2369,15 @@ static int xennet_remove(struct xenbus_device *dev)
2052 2369
2053 unregister_netdev(info->netdev); 2370 unregister_netdev(info->netdev);
2054 2371
2055 del_timer_sync(&info->rx_refill_timer); 2372 for (i = 0; i < num_queues; ++i) {
2373 queue = &info->queues[i];
2374 del_timer_sync(&queue->rx_refill_timer);
2375 }
2376
2377 if (num_queues) {
2378 kfree(info->queues);
2379 info->queues = NULL;
2380 }
2056 2381
2057 free_percpu(info->stats); 2382 free_percpu(info->stats);
2058 2383
@@ -2078,6 +2403,9 @@ static int __init netif_init(void)
2078 2403
2079 pr_info("Initialising Xen virtual ethernet driver\n"); 2404 pr_info("Initialising Xen virtual ethernet driver\n");
2080 2405
2406 /* Allow as many queues as there are CPUs, by default */
2407 xennet_max_queues = num_online_cpus();
2408
2081 return xenbus_register_frontend(&netfront_driver); 2409 return xenbus_register_frontend(&netfront_driver);
2082} 2410}
2083module_init(netif_init); 2411module_init(netif_init);