diff options
author | Jason Wang <jasowang@redhat.com> | 2012-12-07 02:04:55 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2012-12-09 00:30:54 -0500 |
commit | e9d7417b97f420fa70e3e198f2603a0375fb80a7 (patch) | |
tree | 4d7061e2fbf8339fa4c8fd3c14f5e569af3a10ae /drivers/net/virtio_net.c | |
parent | 0afb1666fe4ed8e51083af544f00d31bc8753352 (diff) |
virtio-net: separate fields of sending/receiving queue from virtnet_info
To support multiqueue transmitq/receiveq, the first step is to separate queue
related structure from virtnet_info. This patch introduce send_queue and
receive_queue structure and use the pointer to them as the parameter in
functions handling sending/receiving.
Signed-off-by: Krishna Kumar <krkumar2@in.ibm.com>
Signed-off-by: Jason Wang <jasowang@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/virtio_net.c')
-rw-r--r-- | drivers/net/virtio_net.c | 282 |
1 files changed, 158 insertions, 124 deletions
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 90ac97df2d02..02a71021565e 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
@@ -51,16 +51,40 @@ struct virtnet_stats { | |||
51 | u64 rx_packets; | 51 | u64 rx_packets; |
52 | }; | 52 | }; |
53 | 53 | ||
54 | struct virtnet_info { | 54 | /* Internal representation of a send virtqueue */ |
55 | struct virtio_device *vdev; | 55 | struct send_queue { |
56 | struct virtqueue *rvq, *svq, *cvq; | 56 | /* Virtqueue associated with this send _queue */ |
57 | struct net_device *dev; | 57 | struct virtqueue *vq; |
58 | |||
59 | /* TX: fragments + linear part + virtio header */ | ||
60 | struct scatterlist sg[MAX_SKB_FRAGS + 2]; | ||
61 | }; | ||
62 | |||
63 | /* Internal representation of a receive virtqueue */ | ||
64 | struct receive_queue { | ||
65 | /* Virtqueue associated with this receive_queue */ | ||
66 | struct virtqueue *vq; | ||
67 | |||
58 | struct napi_struct napi; | 68 | struct napi_struct napi; |
59 | unsigned int status; | ||
60 | 69 | ||
61 | /* Number of input buffers, and max we've ever had. */ | 70 | /* Number of input buffers, and max we've ever had. */ |
62 | unsigned int num, max; | 71 | unsigned int num, max; |
63 | 72 | ||
73 | /* Chain pages by the private ptr. */ | ||
74 | struct page *pages; | ||
75 | |||
76 | /* RX: fragments + linear part + virtio header */ | ||
77 | struct scatterlist sg[MAX_SKB_FRAGS + 2]; | ||
78 | }; | ||
79 | |||
80 | struct virtnet_info { | ||
81 | struct virtio_device *vdev; | ||
82 | struct virtqueue *cvq; | ||
83 | struct net_device *dev; | ||
84 | struct send_queue sq; | ||
85 | struct receive_queue rq; | ||
86 | unsigned int status; | ||
87 | |||
64 | /* I like... big packets and I cannot lie! */ | 88 | /* I like... big packets and I cannot lie! */ |
65 | bool big_packets; | 89 | bool big_packets; |
66 | 90 | ||
@@ -81,13 +105,6 @@ struct virtnet_info { | |||
81 | 105 | ||
82 | /* Lock for config space updates */ | 106 | /* Lock for config space updates */ |
83 | struct mutex config_lock; | 107 | struct mutex config_lock; |
84 | |||
85 | /* Chain pages by the private ptr. */ | ||
86 | struct page *pages; | ||
87 | |||
88 | /* fragments + linear part + virtio header */ | ||
89 | struct scatterlist rx_sg[MAX_SKB_FRAGS + 2]; | ||
90 | struct scatterlist tx_sg[MAX_SKB_FRAGS + 2]; | ||
91 | }; | 108 | }; |
92 | 109 | ||
93 | struct skb_vnet_hdr { | 110 | struct skb_vnet_hdr { |
@@ -117,22 +134,22 @@ static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb) | |||
117 | * private is used to chain pages for big packets, put the whole | 134 | * private is used to chain pages for big packets, put the whole |
118 | * most recent used list in the beginning for reuse | 135 | * most recent used list in the beginning for reuse |
119 | */ | 136 | */ |
120 | static void give_pages(struct virtnet_info *vi, struct page *page) | 137 | static void give_pages(struct receive_queue *rq, struct page *page) |
121 | { | 138 | { |
122 | struct page *end; | 139 | struct page *end; |
123 | 140 | ||
124 | /* Find end of list, sew whole thing into vi->pages. */ | 141 | /* Find end of list, sew whole thing into vi->rq.pages. */ |
125 | for (end = page; end->private; end = (struct page *)end->private); | 142 | for (end = page; end->private; end = (struct page *)end->private); |
126 | end->private = (unsigned long)vi->pages; | 143 | end->private = (unsigned long)rq->pages; |
127 | vi->pages = page; | 144 | rq->pages = page; |
128 | } | 145 | } |
129 | 146 | ||
130 | static struct page *get_a_page(struct virtnet_info *vi, gfp_t gfp_mask) | 147 | static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask) |
131 | { | 148 | { |
132 | struct page *p = vi->pages; | 149 | struct page *p = rq->pages; |
133 | 150 | ||
134 | if (p) { | 151 | if (p) { |
135 | vi->pages = (struct page *)p->private; | 152 | rq->pages = (struct page *)p->private; |
136 | /* clear private here, it is used to chain pages */ | 153 | /* clear private here, it is used to chain pages */ |
137 | p->private = 0; | 154 | p->private = 0; |
138 | } else | 155 | } else |
@@ -140,12 +157,12 @@ static struct page *get_a_page(struct virtnet_info *vi, gfp_t gfp_mask) | |||
140 | return p; | 157 | return p; |
141 | } | 158 | } |
142 | 159 | ||
143 | static void skb_xmit_done(struct virtqueue *svq) | 160 | static void skb_xmit_done(struct virtqueue *vq) |
144 | { | 161 | { |
145 | struct virtnet_info *vi = svq->vdev->priv; | 162 | struct virtnet_info *vi = vq->vdev->priv; |
146 | 163 | ||
147 | /* Suppress further interrupts. */ | 164 | /* Suppress further interrupts. */ |
148 | virtqueue_disable_cb(svq); | 165 | virtqueue_disable_cb(vq); |
149 | 166 | ||
150 | /* We were probably waiting for more output buffers. */ | 167 | /* We were probably waiting for more output buffers. */ |
151 | netif_wake_queue(vi->dev); | 168 | netif_wake_queue(vi->dev); |
@@ -167,9 +184,10 @@ static void set_skb_frag(struct sk_buff *skb, struct page *page, | |||
167 | } | 184 | } |
168 | 185 | ||
169 | /* Called from bottom half context */ | 186 | /* Called from bottom half context */ |
170 | static struct sk_buff *page_to_skb(struct virtnet_info *vi, | 187 | static struct sk_buff *page_to_skb(struct receive_queue *rq, |
171 | struct page *page, unsigned int len) | 188 | struct page *page, unsigned int len) |
172 | { | 189 | { |
190 | struct virtnet_info *vi = rq->vq->vdev->priv; | ||
173 | struct sk_buff *skb; | 191 | struct sk_buff *skb; |
174 | struct skb_vnet_hdr *hdr; | 192 | struct skb_vnet_hdr *hdr; |
175 | unsigned int copy, hdr_len, offset; | 193 | unsigned int copy, hdr_len, offset; |
@@ -224,12 +242,12 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi, | |||
224 | } | 242 | } |
225 | 243 | ||
226 | if (page) | 244 | if (page) |
227 | give_pages(vi, page); | 245 | give_pages(rq, page); |
228 | 246 | ||
229 | return skb; | 247 | return skb; |
230 | } | 248 | } |
231 | 249 | ||
232 | static int receive_mergeable(struct virtnet_info *vi, struct sk_buff *skb) | 250 | static int receive_mergeable(struct receive_queue *rq, struct sk_buff *skb) |
233 | { | 251 | { |
234 | struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb); | 252 | struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb); |
235 | struct page *page; | 253 | struct page *page; |
@@ -243,7 +261,7 @@ static int receive_mergeable(struct virtnet_info *vi, struct sk_buff *skb) | |||
243 | skb->dev->stats.rx_length_errors++; | 261 | skb->dev->stats.rx_length_errors++; |
244 | return -EINVAL; | 262 | return -EINVAL; |
245 | } | 263 | } |
246 | page = virtqueue_get_buf(vi->rvq, &len); | 264 | page = virtqueue_get_buf(rq->vq, &len); |
247 | if (!page) { | 265 | if (!page) { |
248 | pr_debug("%s: rx error: %d buffers missing\n", | 266 | pr_debug("%s: rx error: %d buffers missing\n", |
249 | skb->dev->name, hdr->mhdr.num_buffers); | 267 | skb->dev->name, hdr->mhdr.num_buffers); |
@@ -256,14 +274,15 @@ static int receive_mergeable(struct virtnet_info *vi, struct sk_buff *skb) | |||
256 | 274 | ||
257 | set_skb_frag(skb, page, 0, &len); | 275 | set_skb_frag(skb, page, 0, &len); |
258 | 276 | ||
259 | --vi->num; | 277 | --rq->num; |
260 | } | 278 | } |
261 | return 0; | 279 | return 0; |
262 | } | 280 | } |
263 | 281 | ||
264 | static void receive_buf(struct net_device *dev, void *buf, unsigned int len) | 282 | static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len) |
265 | { | 283 | { |
266 | struct virtnet_info *vi = netdev_priv(dev); | 284 | struct virtnet_info *vi = rq->vq->vdev->priv; |
285 | struct net_device *dev = vi->dev; | ||
267 | struct virtnet_stats *stats = this_cpu_ptr(vi->stats); | 286 | struct virtnet_stats *stats = this_cpu_ptr(vi->stats); |
268 | struct sk_buff *skb; | 287 | struct sk_buff *skb; |
269 | struct page *page; | 288 | struct page *page; |
@@ -273,7 +292,7 @@ static void receive_buf(struct net_device *dev, void *buf, unsigned int len) | |||
273 | pr_debug("%s: short packet %i\n", dev->name, len); | 292 | pr_debug("%s: short packet %i\n", dev->name, len); |
274 | dev->stats.rx_length_errors++; | 293 | dev->stats.rx_length_errors++; |
275 | if (vi->mergeable_rx_bufs || vi->big_packets) | 294 | if (vi->mergeable_rx_bufs || vi->big_packets) |
276 | give_pages(vi, buf); | 295 | give_pages(rq, buf); |
277 | else | 296 | else |
278 | dev_kfree_skb(buf); | 297 | dev_kfree_skb(buf); |
279 | return; | 298 | return; |
@@ -285,14 +304,14 @@ static void receive_buf(struct net_device *dev, void *buf, unsigned int len) | |||
285 | skb_trim(skb, len); | 304 | skb_trim(skb, len); |
286 | } else { | 305 | } else { |
287 | page = buf; | 306 | page = buf; |
288 | skb = page_to_skb(vi, page, len); | 307 | skb = page_to_skb(rq, page, len); |
289 | if (unlikely(!skb)) { | 308 | if (unlikely(!skb)) { |
290 | dev->stats.rx_dropped++; | 309 | dev->stats.rx_dropped++; |
291 | give_pages(vi, page); | 310 | give_pages(rq, page); |
292 | return; | 311 | return; |
293 | } | 312 | } |
294 | if (vi->mergeable_rx_bufs) | 313 | if (vi->mergeable_rx_bufs) |
295 | if (receive_mergeable(vi, skb)) { | 314 | if (receive_mergeable(rq, skb)) { |
296 | dev_kfree_skb(skb); | 315 | dev_kfree_skb(skb); |
297 | return; | 316 | return; |
298 | } | 317 | } |
@@ -359,8 +378,9 @@ frame_err: | |||
359 | dev_kfree_skb(skb); | 378 | dev_kfree_skb(skb); |
360 | } | 379 | } |
361 | 380 | ||
362 | static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp) | 381 | static int add_recvbuf_small(struct receive_queue *rq, gfp_t gfp) |
363 | { | 382 | { |
383 | struct virtnet_info *vi = rq->vq->vdev->priv; | ||
364 | struct sk_buff *skb; | 384 | struct sk_buff *skb; |
365 | struct skb_vnet_hdr *hdr; | 385 | struct skb_vnet_hdr *hdr; |
366 | int err; | 386 | int err; |
@@ -372,77 +392,77 @@ static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp) | |||
372 | skb_put(skb, MAX_PACKET_LEN); | 392 | skb_put(skb, MAX_PACKET_LEN); |
373 | 393 | ||
374 | hdr = skb_vnet_hdr(skb); | 394 | hdr = skb_vnet_hdr(skb); |
375 | sg_set_buf(vi->rx_sg, &hdr->hdr, sizeof hdr->hdr); | 395 | sg_set_buf(rq->sg, &hdr->hdr, sizeof hdr->hdr); |
376 | 396 | ||
377 | skb_to_sgvec(skb, vi->rx_sg + 1, 0, skb->len); | 397 | skb_to_sgvec(skb, rq->sg + 1, 0, skb->len); |
378 | 398 | ||
379 | err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, 2, skb, gfp); | 399 | err = virtqueue_add_buf(rq->vq, rq->sg, 0, 2, skb, gfp); |
380 | if (err < 0) | 400 | if (err < 0) |
381 | dev_kfree_skb(skb); | 401 | dev_kfree_skb(skb); |
382 | 402 | ||
383 | return err; | 403 | return err; |
384 | } | 404 | } |
385 | 405 | ||
386 | static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp) | 406 | static int add_recvbuf_big(struct receive_queue *rq, gfp_t gfp) |
387 | { | 407 | { |
388 | struct page *first, *list = NULL; | 408 | struct page *first, *list = NULL; |
389 | char *p; | 409 | char *p; |
390 | int i, err, offset; | 410 | int i, err, offset; |
391 | 411 | ||
392 | /* page in vi->rx_sg[MAX_SKB_FRAGS + 1] is list tail */ | 412 | /* page in rq->sg[MAX_SKB_FRAGS + 1] is list tail */ |
393 | for (i = MAX_SKB_FRAGS + 1; i > 1; --i) { | 413 | for (i = MAX_SKB_FRAGS + 1; i > 1; --i) { |
394 | first = get_a_page(vi, gfp); | 414 | first = get_a_page(rq, gfp); |
395 | if (!first) { | 415 | if (!first) { |
396 | if (list) | 416 | if (list) |
397 | give_pages(vi, list); | 417 | give_pages(rq, list); |
398 | return -ENOMEM; | 418 | return -ENOMEM; |
399 | } | 419 | } |
400 | sg_set_buf(&vi->rx_sg[i], page_address(first), PAGE_SIZE); | 420 | sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE); |
401 | 421 | ||
402 | /* chain new page in list head to match sg */ | 422 | /* chain new page in list head to match sg */ |
403 | first->private = (unsigned long)list; | 423 | first->private = (unsigned long)list; |
404 | list = first; | 424 | list = first; |
405 | } | 425 | } |
406 | 426 | ||
407 | first = get_a_page(vi, gfp); | 427 | first = get_a_page(rq, gfp); |
408 | if (!first) { | 428 | if (!first) { |
409 | give_pages(vi, list); | 429 | give_pages(rq, list); |
410 | return -ENOMEM; | 430 | return -ENOMEM; |
411 | } | 431 | } |
412 | p = page_address(first); | 432 | p = page_address(first); |
413 | 433 | ||
414 | /* vi->rx_sg[0], vi->rx_sg[1] share the same page */ | 434 | /* rq->sg[0], rq->sg[1] share the same page */ |
415 | /* a separated vi->rx_sg[0] for virtio_net_hdr only due to QEMU bug */ | 435 | /* a separated rq->sg[0] for virtio_net_hdr only due to QEMU bug */ |
416 | sg_set_buf(&vi->rx_sg[0], p, sizeof(struct virtio_net_hdr)); | 436 | sg_set_buf(&rq->sg[0], p, sizeof(struct virtio_net_hdr)); |
417 | 437 | ||
418 | /* vi->rx_sg[1] for data packet, from offset */ | 438 | /* rq->sg[1] for data packet, from offset */ |
419 | offset = sizeof(struct padded_vnet_hdr); | 439 | offset = sizeof(struct padded_vnet_hdr); |
420 | sg_set_buf(&vi->rx_sg[1], p + offset, PAGE_SIZE - offset); | 440 | sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset); |
421 | 441 | ||
422 | /* chain first in list head */ | 442 | /* chain first in list head */ |
423 | first->private = (unsigned long)list; | 443 | first->private = (unsigned long)list; |
424 | err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, MAX_SKB_FRAGS + 2, | 444 | err = virtqueue_add_buf(rq->vq, rq->sg, 0, MAX_SKB_FRAGS + 2, |
425 | first, gfp); | 445 | first, gfp); |
426 | if (err < 0) | 446 | if (err < 0) |
427 | give_pages(vi, first); | 447 | give_pages(rq, first); |
428 | 448 | ||
429 | return err; | 449 | return err; |
430 | } | 450 | } |
431 | 451 | ||
432 | static int add_recvbuf_mergeable(struct virtnet_info *vi, gfp_t gfp) | 452 | static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp) |
433 | { | 453 | { |
434 | struct page *page; | 454 | struct page *page; |
435 | int err; | 455 | int err; |
436 | 456 | ||
437 | page = get_a_page(vi, gfp); | 457 | page = get_a_page(rq, gfp); |
438 | if (!page) | 458 | if (!page) |
439 | return -ENOMEM; | 459 | return -ENOMEM; |
440 | 460 | ||
441 | sg_init_one(vi->rx_sg, page_address(page), PAGE_SIZE); | 461 | sg_init_one(rq->sg, page_address(page), PAGE_SIZE); |
442 | 462 | ||
443 | err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, 1, page, gfp); | 463 | err = virtqueue_add_buf(rq->vq, rq->sg, 0, 1, page, gfp); |
444 | if (err < 0) | 464 | if (err < 0) |
445 | give_pages(vi, page); | 465 | give_pages(rq, page); |
446 | 466 | ||
447 | return err; | 467 | return err; |
448 | } | 468 | } |
@@ -454,65 +474,68 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi, gfp_t gfp) | |||
454 | * before we're receiving packets, or from refill_work which is | 474 | * before we're receiving packets, or from refill_work which is |
455 | * careful to disable receiving (using napi_disable). | 475 | * careful to disable receiving (using napi_disable). |
456 | */ | 476 | */ |
457 | static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp) | 477 | static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp) |
458 | { | 478 | { |
479 | struct virtnet_info *vi = rq->vq->vdev->priv; | ||
459 | int err; | 480 | int err; |
460 | bool oom; | 481 | bool oom; |
461 | 482 | ||
462 | do { | 483 | do { |
463 | if (vi->mergeable_rx_bufs) | 484 | if (vi->mergeable_rx_bufs) |
464 | err = add_recvbuf_mergeable(vi, gfp); | 485 | err = add_recvbuf_mergeable(rq, gfp); |
465 | else if (vi->big_packets) | 486 | else if (vi->big_packets) |
466 | err = add_recvbuf_big(vi, gfp); | 487 | err = add_recvbuf_big(rq, gfp); |
467 | else | 488 | else |
468 | err = add_recvbuf_small(vi, gfp); | 489 | err = add_recvbuf_small(rq, gfp); |
469 | 490 | ||
470 | oom = err == -ENOMEM; | 491 | oom = err == -ENOMEM; |
471 | if (err < 0) | 492 | if (err < 0) |
472 | break; | 493 | break; |
473 | ++vi->num; | 494 | ++rq->num; |
474 | } while (err > 0); | 495 | } while (err > 0); |
475 | if (unlikely(vi->num > vi->max)) | 496 | if (unlikely(rq->num > rq->max)) |
476 | vi->max = vi->num; | 497 | rq->max = rq->num; |
477 | virtqueue_kick(vi->rvq); | 498 | virtqueue_kick(rq->vq); |
478 | return !oom; | 499 | return !oom; |
479 | } | 500 | } |
480 | 501 | ||
481 | static void skb_recv_done(struct virtqueue *rvq) | 502 | static void skb_recv_done(struct virtqueue *rvq) |
482 | { | 503 | { |
483 | struct virtnet_info *vi = rvq->vdev->priv; | 504 | struct virtnet_info *vi = rvq->vdev->priv; |
505 | struct receive_queue *rq = &vi->rq; | ||
506 | |||
484 | /* Schedule NAPI, Suppress further interrupts if successful. */ | 507 | /* Schedule NAPI, Suppress further interrupts if successful. */ |
485 | if (napi_schedule_prep(&vi->napi)) { | 508 | if (napi_schedule_prep(&rq->napi)) { |
486 | virtqueue_disable_cb(rvq); | 509 | virtqueue_disable_cb(rvq); |
487 | __napi_schedule(&vi->napi); | 510 | __napi_schedule(&rq->napi); |
488 | } | 511 | } |
489 | } | 512 | } |
490 | 513 | ||
491 | static void virtnet_napi_enable(struct virtnet_info *vi) | 514 | static void virtnet_napi_enable(struct receive_queue *rq) |
492 | { | 515 | { |
493 | napi_enable(&vi->napi); | 516 | napi_enable(&rq->napi); |
494 | 517 | ||
495 | /* If all buffers were filled by other side before we napi_enabled, we | 518 | /* If all buffers were filled by other side before we napi_enabled, we |
496 | * won't get another interrupt, so process any outstanding packets | 519 | * won't get another interrupt, so process any outstanding packets |
497 | * now. virtnet_poll wants re-enable the queue, so we disable here. | 520 | * now. virtnet_poll wants re-enable the queue, so we disable here. |
498 | * We synchronize against interrupts via NAPI_STATE_SCHED */ | 521 | * We synchronize against interrupts via NAPI_STATE_SCHED */ |
499 | if (napi_schedule_prep(&vi->napi)) { | 522 | if (napi_schedule_prep(&rq->napi)) { |
500 | virtqueue_disable_cb(vi->rvq); | 523 | virtqueue_disable_cb(rq->vq); |
501 | local_bh_disable(); | 524 | local_bh_disable(); |
502 | __napi_schedule(&vi->napi); | 525 | __napi_schedule(&rq->napi); |
503 | local_bh_enable(); | 526 | local_bh_enable(); |
504 | } | 527 | } |
505 | } | 528 | } |
506 | 529 | ||
507 | static void refill_work(struct work_struct *work) | 530 | static void refill_work(struct work_struct *work) |
508 | { | 531 | { |
509 | struct virtnet_info *vi; | 532 | struct virtnet_info *vi = |
533 | container_of(work, struct virtnet_info, refill.work); | ||
510 | bool still_empty; | 534 | bool still_empty; |
511 | 535 | ||
512 | vi = container_of(work, struct virtnet_info, refill.work); | 536 | napi_disable(&vi->rq.napi); |
513 | napi_disable(&vi->napi); | 537 | still_empty = !try_fill_recv(&vi->rq, GFP_KERNEL); |
514 | still_empty = !try_fill_recv(vi, GFP_KERNEL); | 538 | virtnet_napi_enable(&vi->rq); |
515 | virtnet_napi_enable(vi); | ||
516 | 539 | ||
517 | /* In theory, this can happen: if we don't get any buffers in | 540 | /* In theory, this can happen: if we don't get any buffers in |
518 | * we will *never* try to fill again. */ | 541 | * we will *never* try to fill again. */ |
@@ -522,29 +545,31 @@ static void refill_work(struct work_struct *work) | |||
522 | 545 | ||
523 | static int virtnet_poll(struct napi_struct *napi, int budget) | 546 | static int virtnet_poll(struct napi_struct *napi, int budget) |
524 | { | 547 | { |
525 | struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi); | 548 | struct receive_queue *rq = |
549 | container_of(napi, struct receive_queue, napi); | ||
550 | struct virtnet_info *vi = rq->vq->vdev->priv; | ||
526 | void *buf; | 551 | void *buf; |
527 | unsigned int len, received = 0; | 552 | unsigned int len, received = 0; |
528 | 553 | ||
529 | again: | 554 | again: |
530 | while (received < budget && | 555 | while (received < budget && |
531 | (buf = virtqueue_get_buf(vi->rvq, &len)) != NULL) { | 556 | (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { |
532 | receive_buf(vi->dev, buf, len); | 557 | receive_buf(rq, buf, len); |
533 | --vi->num; | 558 | --rq->num; |
534 | received++; | 559 | received++; |
535 | } | 560 | } |
536 | 561 | ||
537 | if (vi->num < vi->max / 2) { | 562 | if (rq->num < rq->max / 2) { |
538 | if (!try_fill_recv(vi, GFP_ATOMIC)) | 563 | if (!try_fill_recv(rq, GFP_ATOMIC)) |
539 | schedule_delayed_work(&vi->refill, 0); | 564 | schedule_delayed_work(&vi->refill, 0); |
540 | } | 565 | } |
541 | 566 | ||
542 | /* Out of packets? */ | 567 | /* Out of packets? */ |
543 | if (received < budget) { | 568 | if (received < budget) { |
544 | napi_complete(napi); | 569 | napi_complete(napi); |
545 | if (unlikely(!virtqueue_enable_cb(vi->rvq)) && | 570 | if (unlikely(!virtqueue_enable_cb(rq->vq)) && |
546 | napi_schedule_prep(napi)) { | 571 | napi_schedule_prep(napi)) { |
547 | virtqueue_disable_cb(vi->rvq); | 572 | virtqueue_disable_cb(rq->vq); |
548 | __napi_schedule(napi); | 573 | __napi_schedule(napi); |
549 | goto again; | 574 | goto again; |
550 | } | 575 | } |
@@ -553,13 +578,14 @@ again: | |||
553 | return received; | 578 | return received; |
554 | } | 579 | } |
555 | 580 | ||
556 | static unsigned int free_old_xmit_skbs(struct virtnet_info *vi) | 581 | static unsigned int free_old_xmit_skbs(struct send_queue *sq) |
557 | { | 582 | { |
558 | struct sk_buff *skb; | 583 | struct sk_buff *skb; |
559 | unsigned int len, tot_sgs = 0; | 584 | unsigned int len, tot_sgs = 0; |
585 | struct virtnet_info *vi = sq->vq->vdev->priv; | ||
560 | struct virtnet_stats *stats = this_cpu_ptr(vi->stats); | 586 | struct virtnet_stats *stats = this_cpu_ptr(vi->stats); |
561 | 587 | ||
562 | while ((skb = virtqueue_get_buf(vi->svq, &len)) != NULL) { | 588 | while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) { |
563 | pr_debug("Sent skb %p\n", skb); | 589 | pr_debug("Sent skb %p\n", skb); |
564 | 590 | ||
565 | u64_stats_update_begin(&stats->tx_syncp); | 591 | u64_stats_update_begin(&stats->tx_syncp); |
@@ -573,10 +599,11 @@ static unsigned int free_old_xmit_skbs(struct virtnet_info *vi) | |||
573 | return tot_sgs; | 599 | return tot_sgs; |
574 | } | 600 | } |
575 | 601 | ||
576 | static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb) | 602 | static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) |
577 | { | 603 | { |
578 | struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb); | 604 | struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb); |
579 | const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; | 605 | const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; |
606 | struct virtnet_info *vi = sq->vq->vdev->priv; | ||
580 | 607 | ||
581 | pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); | 608 | pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); |
582 | 609 | ||
@@ -611,25 +638,26 @@ static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb) | |||
611 | 638 | ||
612 | /* Encode metadata header at front. */ | 639 | /* Encode metadata header at front. */ |
613 | if (vi->mergeable_rx_bufs) | 640 | if (vi->mergeable_rx_bufs) |
614 | sg_set_buf(vi->tx_sg, &hdr->mhdr, sizeof hdr->mhdr); | 641 | sg_set_buf(sq->sg, &hdr->mhdr, sizeof hdr->mhdr); |
615 | else | 642 | else |
616 | sg_set_buf(vi->tx_sg, &hdr->hdr, sizeof hdr->hdr); | 643 | sg_set_buf(sq->sg, &hdr->hdr, sizeof hdr->hdr); |
617 | 644 | ||
618 | hdr->num_sg = skb_to_sgvec(skb, vi->tx_sg + 1, 0, skb->len) + 1; | 645 | hdr->num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1; |
619 | return virtqueue_add_buf(vi->svq, vi->tx_sg, hdr->num_sg, | 646 | return virtqueue_add_buf(sq->vq, sq->sg, hdr->num_sg, |
620 | 0, skb, GFP_ATOMIC); | 647 | 0, skb, GFP_ATOMIC); |
621 | } | 648 | } |
622 | 649 | ||
623 | static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) | 650 | static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) |
624 | { | 651 | { |
625 | struct virtnet_info *vi = netdev_priv(dev); | 652 | struct virtnet_info *vi = netdev_priv(dev); |
653 | struct send_queue *sq = &vi->sq; | ||
626 | int capacity; | 654 | int capacity; |
627 | 655 | ||
628 | /* Free up any pending old buffers before queueing new ones. */ | 656 | /* Free up any pending old buffers before queueing new ones. */ |
629 | free_old_xmit_skbs(vi); | 657 | free_old_xmit_skbs(sq); |
630 | 658 | ||
631 | /* Try to transmit */ | 659 | /* Try to transmit */ |
632 | capacity = xmit_skb(vi, skb); | 660 | capacity = xmit_skb(sq, skb); |
633 | 661 | ||
634 | /* This can happen with OOM and indirect buffers. */ | 662 | /* This can happen with OOM and indirect buffers. */ |
635 | if (unlikely(capacity < 0)) { | 663 | if (unlikely(capacity < 0)) { |
@@ -648,7 +676,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
648 | kfree_skb(skb); | 676 | kfree_skb(skb); |
649 | return NETDEV_TX_OK; | 677 | return NETDEV_TX_OK; |
650 | } | 678 | } |
651 | virtqueue_kick(vi->svq); | 679 | virtqueue_kick(sq->vq); |
652 | 680 | ||
653 | /* Don't wait up for transmitted skbs to be freed. */ | 681 | /* Don't wait up for transmitted skbs to be freed. */ |
654 | skb_orphan(skb); | 682 | skb_orphan(skb); |
@@ -658,12 +686,12 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
658 | * before it gets out of hand. Naturally, this wastes entries. */ | 686 | * before it gets out of hand. Naturally, this wastes entries. */ |
659 | if (capacity < 2+MAX_SKB_FRAGS) { | 687 | if (capacity < 2+MAX_SKB_FRAGS) { |
660 | netif_stop_queue(dev); | 688 | netif_stop_queue(dev); |
661 | if (unlikely(!virtqueue_enable_cb_delayed(vi->svq))) { | 689 | if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { |
662 | /* More just got used, free them then recheck. */ | 690 | /* More just got used, free them then recheck. */ |
663 | capacity += free_old_xmit_skbs(vi); | 691 | capacity += free_old_xmit_skbs(sq); |
664 | if (capacity >= 2+MAX_SKB_FRAGS) { | 692 | if (capacity >= 2+MAX_SKB_FRAGS) { |
665 | netif_start_queue(dev); | 693 | netif_start_queue(dev); |
666 | virtqueue_disable_cb(vi->svq); | 694 | virtqueue_disable_cb(sq->vq); |
667 | } | 695 | } |
668 | } | 696 | } |
669 | } | 697 | } |
@@ -731,7 +759,7 @@ static void virtnet_netpoll(struct net_device *dev) | |||
731 | { | 759 | { |
732 | struct virtnet_info *vi = netdev_priv(dev); | 760 | struct virtnet_info *vi = netdev_priv(dev); |
733 | 761 | ||
734 | napi_schedule(&vi->napi); | 762 | napi_schedule(&vi->rq.napi); |
735 | } | 763 | } |
736 | #endif | 764 | #endif |
737 | 765 | ||
@@ -740,10 +768,10 @@ static int virtnet_open(struct net_device *dev) | |||
740 | struct virtnet_info *vi = netdev_priv(dev); | 768 | struct virtnet_info *vi = netdev_priv(dev); |
741 | 769 | ||
742 | /* Make sure we have some buffers: if oom use wq. */ | 770 | /* Make sure we have some buffers: if oom use wq. */ |
743 | if (!try_fill_recv(vi, GFP_KERNEL)) | 771 | if (!try_fill_recv(&vi->rq, GFP_KERNEL)) |
744 | schedule_delayed_work(&vi->refill, 0); | 772 | schedule_delayed_work(&vi->refill, 0); |
745 | 773 | ||
746 | virtnet_napi_enable(vi); | 774 | virtnet_napi_enable(&vi->rq); |
747 | return 0; | 775 | return 0; |
748 | } | 776 | } |
749 | 777 | ||
@@ -808,7 +836,7 @@ static int virtnet_close(struct net_device *dev) | |||
808 | 836 | ||
809 | /* Make sure refill_work doesn't re-enable napi! */ | 837 | /* Make sure refill_work doesn't re-enable napi! */ |
810 | cancel_delayed_work_sync(&vi->refill); | 838 | cancel_delayed_work_sync(&vi->refill); |
811 | napi_disable(&vi->napi); | 839 | napi_disable(&vi->rq.napi); |
812 | 840 | ||
813 | return 0; | 841 | return 0; |
814 | } | 842 | } |
@@ -920,11 +948,10 @@ static void virtnet_get_ringparam(struct net_device *dev, | |||
920 | { | 948 | { |
921 | struct virtnet_info *vi = netdev_priv(dev); | 949 | struct virtnet_info *vi = netdev_priv(dev); |
922 | 950 | ||
923 | ring->rx_max_pending = virtqueue_get_vring_size(vi->rvq); | 951 | ring->rx_max_pending = virtqueue_get_vring_size(vi->rq.vq); |
924 | ring->tx_max_pending = virtqueue_get_vring_size(vi->svq); | 952 | ring->tx_max_pending = virtqueue_get_vring_size(vi->sq.vq); |
925 | ring->rx_pending = ring->rx_max_pending; | 953 | ring->rx_pending = ring->rx_max_pending; |
926 | ring->tx_pending = ring->tx_max_pending; | 954 | ring->tx_pending = ring->tx_max_pending; |
927 | |||
928 | } | 955 | } |
929 | 956 | ||
930 | 957 | ||
@@ -1019,6 +1046,13 @@ static void virtnet_config_changed(struct virtio_device *vdev) | |||
1019 | schedule_work(&vi->config_work); | 1046 | schedule_work(&vi->config_work); |
1020 | } | 1047 | } |
1021 | 1048 | ||
1049 | static void virtnet_del_vqs(struct virtnet_info *vi) | ||
1050 | { | ||
1051 | struct virtio_device *vdev = vi->vdev; | ||
1052 | |||
1053 | vdev->config->del_vqs(vdev); | ||
1054 | } | ||
1055 | |||
1022 | static int init_vqs(struct virtnet_info *vi) | 1056 | static int init_vqs(struct virtnet_info *vi) |
1023 | { | 1057 | { |
1024 | struct virtqueue *vqs[3]; | 1058 | struct virtqueue *vqs[3]; |
@@ -1034,8 +1068,8 @@ static int init_vqs(struct virtnet_info *vi) | |||
1034 | if (err) | 1068 | if (err) |
1035 | return err; | 1069 | return err; |
1036 | 1070 | ||
1037 | vi->rvq = vqs[0]; | 1071 | vi->rq.vq = vqs[0]; |
1038 | vi->svq = vqs[1]; | 1072 | vi->sq.vq = vqs[1]; |
1039 | 1073 | ||
1040 | if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) { | 1074 | if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) { |
1041 | vi->cvq = vqs[2]; | 1075 | vi->cvq = vqs[2]; |
@@ -1099,11 +1133,11 @@ static int virtnet_probe(struct virtio_device *vdev) | |||
1099 | 1133 | ||
1100 | /* Set up our device-specific information */ | 1134 | /* Set up our device-specific information */ |
1101 | vi = netdev_priv(dev); | 1135 | vi = netdev_priv(dev); |
1102 | netif_napi_add(dev, &vi->napi, virtnet_poll, napi_weight); | 1136 | netif_napi_add(dev, &vi->rq.napi, virtnet_poll, napi_weight); |
1103 | vi->dev = dev; | 1137 | vi->dev = dev; |
1104 | vi->vdev = vdev; | 1138 | vi->vdev = vdev; |
1105 | vdev->priv = vi; | 1139 | vdev->priv = vi; |
1106 | vi->pages = NULL; | 1140 | vi->rq.pages = NULL; |
1107 | vi->stats = alloc_percpu(struct virtnet_stats); | 1141 | vi->stats = alloc_percpu(struct virtnet_stats); |
1108 | err = -ENOMEM; | 1142 | err = -ENOMEM; |
1109 | if (vi->stats == NULL) | 1143 | if (vi->stats == NULL) |
@@ -1113,8 +1147,8 @@ static int virtnet_probe(struct virtio_device *vdev) | |||
1113 | mutex_init(&vi->config_lock); | 1147 | mutex_init(&vi->config_lock); |
1114 | vi->config_enable = true; | 1148 | vi->config_enable = true; |
1115 | INIT_WORK(&vi->config_work, virtnet_config_changed_work); | 1149 | INIT_WORK(&vi->config_work, virtnet_config_changed_work); |
1116 | sg_init_table(vi->rx_sg, ARRAY_SIZE(vi->rx_sg)); | 1150 | sg_init_table(vi->rq.sg, ARRAY_SIZE(vi->rq.sg)); |
1117 | sg_init_table(vi->tx_sg, ARRAY_SIZE(vi->tx_sg)); | 1151 | sg_init_table(vi->sq.sg, ARRAY_SIZE(vi->sq.sg)); |
1118 | 1152 | ||
1119 | /* If we can receive ANY GSO packets, we must allocate large ones. */ | 1153 | /* If we can receive ANY GSO packets, we must allocate large ones. */ |
1120 | if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || | 1154 | if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || |
@@ -1136,10 +1170,10 @@ static int virtnet_probe(struct virtio_device *vdev) | |||
1136 | } | 1170 | } |
1137 | 1171 | ||
1138 | /* Last of all, set up some receive buffers. */ | 1172 | /* Last of all, set up some receive buffers. */ |
1139 | try_fill_recv(vi, GFP_KERNEL); | 1173 | try_fill_recv(&vi->rq, GFP_KERNEL); |
1140 | 1174 | ||
1141 | /* If we didn't even get one input buffer, we're useless. */ | 1175 | /* If we didn't even get one input buffer, we're useless. */ |
1142 | if (vi->num == 0) { | 1176 | if (vi->rq.num == 0) { |
1143 | err = -ENOMEM; | 1177 | err = -ENOMEM; |
1144 | goto unregister; | 1178 | goto unregister; |
1145 | } | 1179 | } |
@@ -1160,7 +1194,7 @@ static int virtnet_probe(struct virtio_device *vdev) | |||
1160 | unregister: | 1194 | unregister: |
1161 | unregister_netdev(dev); | 1195 | unregister_netdev(dev); |
1162 | free_vqs: | 1196 | free_vqs: |
1163 | vdev->config->del_vqs(vdev); | 1197 | virtnet_del_vqs(vi); |
1164 | free_stats: | 1198 | free_stats: |
1165 | free_percpu(vi->stats); | 1199 | free_percpu(vi->stats); |
1166 | free: | 1200 | free: |
@@ -1172,22 +1206,22 @@ static void free_unused_bufs(struct virtnet_info *vi) | |||
1172 | { | 1206 | { |
1173 | void *buf; | 1207 | void *buf; |
1174 | while (1) { | 1208 | while (1) { |
1175 | buf = virtqueue_detach_unused_buf(vi->svq); | 1209 | buf = virtqueue_detach_unused_buf(vi->sq.vq); |
1176 | if (!buf) | 1210 | if (!buf) |
1177 | break; | 1211 | break; |
1178 | dev_kfree_skb(buf); | 1212 | dev_kfree_skb(buf); |
1179 | } | 1213 | } |
1180 | while (1) { | 1214 | while (1) { |
1181 | buf = virtqueue_detach_unused_buf(vi->rvq); | 1215 | buf = virtqueue_detach_unused_buf(vi->rq.vq); |
1182 | if (!buf) | 1216 | if (!buf) |
1183 | break; | 1217 | break; |
1184 | if (vi->mergeable_rx_bufs || vi->big_packets) | 1218 | if (vi->mergeable_rx_bufs || vi->big_packets) |
1185 | give_pages(vi, buf); | 1219 | give_pages(&vi->rq, buf); |
1186 | else | 1220 | else |
1187 | dev_kfree_skb(buf); | 1221 | dev_kfree_skb(buf); |
1188 | --vi->num; | 1222 | --vi->rq.num; |
1189 | } | 1223 | } |
1190 | BUG_ON(vi->num != 0); | 1224 | BUG_ON(vi->rq.num != 0); |
1191 | } | 1225 | } |
1192 | 1226 | ||
1193 | static void remove_vq_common(struct virtnet_info *vi) | 1227 | static void remove_vq_common(struct virtnet_info *vi) |
@@ -1197,10 +1231,10 @@ static void remove_vq_common(struct virtnet_info *vi) | |||
1197 | /* Free unused buffers in both send and recv, if any. */ | 1231 | /* Free unused buffers in both send and recv, if any. */ |
1198 | free_unused_bufs(vi); | 1232 | free_unused_bufs(vi); |
1199 | 1233 | ||
1200 | vi->vdev->config->del_vqs(vi->vdev); | 1234 | virtnet_del_vqs(vi); |
1201 | 1235 | ||
1202 | while (vi->pages) | 1236 | while (vi->rq.pages) |
1203 | __free_pages(get_a_page(vi, GFP_KERNEL), 0); | 1237 | __free_pages(get_a_page(&vi->rq, GFP_KERNEL), 0); |
1204 | } | 1238 | } |
1205 | 1239 | ||
1206 | static void virtnet_remove(struct virtio_device *vdev) | 1240 | static void virtnet_remove(struct virtio_device *vdev) |
@@ -1236,7 +1270,7 @@ static int virtnet_freeze(struct virtio_device *vdev) | |||
1236 | cancel_delayed_work_sync(&vi->refill); | 1270 | cancel_delayed_work_sync(&vi->refill); |
1237 | 1271 | ||
1238 | if (netif_running(vi->dev)) | 1272 | if (netif_running(vi->dev)) |
1239 | napi_disable(&vi->napi); | 1273 | napi_disable(&vi->rq.napi); |
1240 | 1274 | ||
1241 | remove_vq_common(vi); | 1275 | remove_vq_common(vi); |
1242 | 1276 | ||
@@ -1255,11 +1289,11 @@ static int virtnet_restore(struct virtio_device *vdev) | |||
1255 | return err; | 1289 | return err; |
1256 | 1290 | ||
1257 | if (netif_running(vi->dev)) | 1291 | if (netif_running(vi->dev)) |
1258 | virtnet_napi_enable(vi); | 1292 | virtnet_napi_enable(&vi->rq); |
1259 | 1293 | ||
1260 | netif_device_attach(vi->dev); | 1294 | netif_device_attach(vi->dev); |
1261 | 1295 | ||
1262 | if (!try_fill_recv(vi, GFP_KERNEL)) | 1296 | if (!try_fill_recv(&vi->rq, GFP_KERNEL)) |
1263 | schedule_delayed_work(&vi->refill, 0); | 1297 | schedule_delayed_work(&vi->refill, 0); |
1264 | 1298 | ||
1265 | mutex_lock(&vi->config_lock); | 1299 | mutex_lock(&vi->config_lock); |