diff options
Diffstat (limited to 'drivers/net/virtio_net.c')
-rw-r--r-- | drivers/net/virtio_net.c | 85 |
1 files changed, 51 insertions, 34 deletions
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 01f4eb5c8b78..7bab4de658a9 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
@@ -36,7 +36,10 @@ module_param(csum, bool, 0444); | |||
36 | module_param(gso, bool, 0444); | 36 | module_param(gso, bool, 0444); |
37 | 37 | ||
38 | /* FIXME: MTU in config. */ | 38 | /* FIXME: MTU in config. */ |
39 | #define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) | 39 | #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) |
40 | #define MERGE_BUFFER_LEN (ALIGN(GOOD_PACKET_LEN + \ | ||
41 | sizeof(struct virtio_net_hdr_mrg_rxbuf), \ | ||
42 | L1_CACHE_BYTES)) | ||
40 | #define GOOD_COPY_LEN 128 | 43 | #define GOOD_COPY_LEN 128 |
41 | 44 | ||
42 | #define VIRTNET_DRIVER_VERSION "1.0.0" | 45 | #define VIRTNET_DRIVER_VERSION "1.0.0" |
@@ -314,10 +317,10 @@ static int receive_mergeable(struct receive_queue *rq, struct sk_buff *head_skb) | |||
314 | head_skb->dev->stats.rx_length_errors++; | 317 | head_skb->dev->stats.rx_length_errors++; |
315 | return -EINVAL; | 318 | return -EINVAL; |
316 | } | 319 | } |
317 | if (unlikely(len > MAX_PACKET_LEN)) { | 320 | if (unlikely(len > MERGE_BUFFER_LEN)) { |
318 | pr_debug("%s: rx error: merge buffer too long\n", | 321 | pr_debug("%s: rx error: merge buffer too long\n", |
319 | head_skb->dev->name); | 322 | head_skb->dev->name); |
320 | len = MAX_PACKET_LEN; | 323 | len = MERGE_BUFFER_LEN; |
321 | } | 324 | } |
322 | if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) { | 325 | if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) { |
323 | struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC); | 326 | struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC); |
@@ -336,18 +339,17 @@ static int receive_mergeable(struct receive_queue *rq, struct sk_buff *head_skb) | |||
336 | if (curr_skb != head_skb) { | 339 | if (curr_skb != head_skb) { |
337 | head_skb->data_len += len; | 340 | head_skb->data_len += len; |
338 | head_skb->len += len; | 341 | head_skb->len += len; |
339 | head_skb->truesize += MAX_PACKET_LEN; | 342 | head_skb->truesize += MERGE_BUFFER_LEN; |
340 | } | 343 | } |
341 | page = virt_to_head_page(buf); | 344 | page = virt_to_head_page(buf); |
342 | offset = buf - (char *)page_address(page); | 345 | offset = buf - (char *)page_address(page); |
343 | if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) { | 346 | if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) { |
344 | put_page(page); | 347 | put_page(page); |
345 | skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1, | 348 | skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1, |
346 | len, MAX_PACKET_LEN); | 349 | len, MERGE_BUFFER_LEN); |
347 | } else { | 350 | } else { |
348 | skb_add_rx_frag(curr_skb, num_skb_frags, page, | 351 | skb_add_rx_frag(curr_skb, num_skb_frags, page, |
349 | offset, len, | 352 | offset, len, MERGE_BUFFER_LEN); |
350 | MAX_PACKET_LEN); | ||
351 | } | 353 | } |
352 | --rq->num; | 354 | --rq->num; |
353 | } | 355 | } |
@@ -383,7 +385,7 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len) | |||
383 | struct page *page = virt_to_head_page(buf); | 385 | struct page *page = virt_to_head_page(buf); |
384 | skb = page_to_skb(rq, page, | 386 | skb = page_to_skb(rq, page, |
385 | (char *)buf - (char *)page_address(page), | 387 | (char *)buf - (char *)page_address(page), |
386 | len, MAX_PACKET_LEN); | 388 | len, MERGE_BUFFER_LEN); |
387 | if (unlikely(!skb)) { | 389 | if (unlikely(!skb)) { |
388 | dev->stats.rx_dropped++; | 390 | dev->stats.rx_dropped++; |
389 | put_page(page); | 391 | put_page(page); |
@@ -471,11 +473,11 @@ static int add_recvbuf_small(struct receive_queue *rq, gfp_t gfp) | |||
471 | struct skb_vnet_hdr *hdr; | 473 | struct skb_vnet_hdr *hdr; |
472 | int err; | 474 | int err; |
473 | 475 | ||
474 | skb = __netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN, gfp); | 476 | skb = __netdev_alloc_skb_ip_align(vi->dev, GOOD_PACKET_LEN, gfp); |
475 | if (unlikely(!skb)) | 477 | if (unlikely(!skb)) |
476 | return -ENOMEM; | 478 | return -ENOMEM; |
477 | 479 | ||
478 | skb_put(skb, MAX_PACKET_LEN); | 480 | skb_put(skb, GOOD_PACKET_LEN); |
479 | 481 | ||
480 | hdr = skb_vnet_hdr(skb); | 482 | hdr = skb_vnet_hdr(skb); |
481 | sg_set_buf(rq->sg, &hdr->hdr, sizeof hdr->hdr); | 483 | sg_set_buf(rq->sg, &hdr->hdr, sizeof hdr->hdr); |
@@ -542,20 +544,20 @@ static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp) | |||
542 | int err; | 544 | int err; |
543 | 545 | ||
544 | if (gfp & __GFP_WAIT) { | 546 | if (gfp & __GFP_WAIT) { |
545 | if (skb_page_frag_refill(MAX_PACKET_LEN, &vi->alloc_frag, | 547 | if (skb_page_frag_refill(MERGE_BUFFER_LEN, &vi->alloc_frag, |
546 | gfp)) { | 548 | gfp)) { |
547 | buf = (char *)page_address(vi->alloc_frag.page) + | 549 | buf = (char *)page_address(vi->alloc_frag.page) + |
548 | vi->alloc_frag.offset; | 550 | vi->alloc_frag.offset; |
549 | get_page(vi->alloc_frag.page); | 551 | get_page(vi->alloc_frag.page); |
550 | vi->alloc_frag.offset += MAX_PACKET_LEN; | 552 | vi->alloc_frag.offset += MERGE_BUFFER_LEN; |
551 | } | 553 | } |
552 | } else { | 554 | } else { |
553 | buf = netdev_alloc_frag(MAX_PACKET_LEN); | 555 | buf = netdev_alloc_frag(MERGE_BUFFER_LEN); |
554 | } | 556 | } |
555 | if (!buf) | 557 | if (!buf) |
556 | return -ENOMEM; | 558 | return -ENOMEM; |
557 | 559 | ||
558 | sg_init_one(rq->sg, buf, MAX_PACKET_LEN); | 560 | sg_init_one(rq->sg, buf, MERGE_BUFFER_LEN); |
559 | err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, buf, gfp); | 561 | err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, buf, gfp); |
560 | if (err < 0) | 562 | if (err < 0) |
561 | put_page(virt_to_head_page(buf)); | 563 | put_page(virt_to_head_page(buf)); |
@@ -591,7 +593,8 @@ static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp) | |||
591 | } while (rq->vq->num_free); | 593 | } while (rq->vq->num_free); |
592 | if (unlikely(rq->num > rq->max)) | 594 | if (unlikely(rq->num > rq->max)) |
593 | rq->max = rq->num; | 595 | rq->max = rq->num; |
594 | virtqueue_kick(rq->vq); | 596 | if (unlikely(!virtqueue_kick(rq->vq))) |
597 | return false; | ||
595 | return !oom; | 598 | return !oom; |
596 | } | 599 | } |
597 | 600 | ||
@@ -797,7 +800,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
797 | err = xmit_skb(sq, skb); | 800 | err = xmit_skb(sq, skb); |
798 | 801 | ||
799 | /* This should not happen! */ | 802 | /* This should not happen! */ |
800 | if (unlikely(err)) { | 803 | if (unlikely(err) || unlikely(!virtqueue_kick(sq->vq))) { |
801 | dev->stats.tx_fifo_errors++; | 804 | dev->stats.tx_fifo_errors++; |
802 | if (net_ratelimit()) | 805 | if (net_ratelimit()) |
803 | dev_warn(&dev->dev, | 806 | dev_warn(&dev->dev, |
@@ -806,7 +809,6 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
806 | kfree_skb(skb); | 809 | kfree_skb(skb); |
807 | return NETDEV_TX_OK; | 810 | return NETDEV_TX_OK; |
808 | } | 811 | } |
809 | virtqueue_kick(sq->vq); | ||
810 | 812 | ||
811 | /* Don't wait up for transmitted skbs to be freed. */ | 813 | /* Don't wait up for transmitted skbs to be freed. */ |
812 | skb_orphan(skb); | 814 | skb_orphan(skb); |
@@ -865,12 +867,14 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, | |||
865 | BUG_ON(virtqueue_add_sgs(vi->cvq, sgs, out_num, in_num, vi, GFP_ATOMIC) | 867 | BUG_ON(virtqueue_add_sgs(vi->cvq, sgs, out_num, in_num, vi, GFP_ATOMIC) |
866 | < 0); | 868 | < 0); |
867 | 869 | ||
868 | virtqueue_kick(vi->cvq); | 870 | if (unlikely(!virtqueue_kick(vi->cvq))) |
871 | return status == VIRTIO_NET_OK; | ||
869 | 872 | ||
870 | /* Spin for a response, the kick causes an ioport write, trapping | 873 | /* Spin for a response, the kick causes an ioport write, trapping |
871 | * into the hypervisor, so the request should be handled immediately. | 874 | * into the hypervisor, so the request should be handled immediately. |
872 | */ | 875 | */ |
873 | while (!virtqueue_get_buf(vi->cvq, &tmp)) | 876 | while (!virtqueue_get_buf(vi->cvq, &tmp) && |
877 | !virtqueue_is_broken(vi->cvq)) | ||
874 | cpu_relax(); | 878 | cpu_relax(); |
875 | 879 | ||
876 | return status == VIRTIO_NET_OK; | 880 | return status == VIRTIO_NET_OK; |
@@ -898,8 +902,13 @@ static int virtnet_set_mac_address(struct net_device *dev, void *p) | |||
898 | return -EINVAL; | 902 | return -EINVAL; |
899 | } | 903 | } |
900 | } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) { | 904 | } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) { |
901 | vdev->config->set(vdev, offsetof(struct virtio_net_config, mac), | 905 | unsigned int i; |
902 | addr->sa_data, dev->addr_len); | 906 | |
907 | /* Naturally, this has an atomicity problem. */ | ||
908 | for (i = 0; i < dev->addr_len; i++) | ||
909 | virtio_cwrite8(vdev, | ||
910 | offsetof(struct virtio_net_config, mac) + | ||
911 | i, addr->sa_data[i]); | ||
903 | } | 912 | } |
904 | 913 | ||
905 | eth_commit_mac_addr_change(dev, p); | 914 | eth_commit_mac_addr_change(dev, p); |
@@ -1281,9 +1290,8 @@ static void virtnet_config_changed_work(struct work_struct *work) | |||
1281 | if (!vi->config_enable) | 1290 | if (!vi->config_enable) |
1282 | goto done; | 1291 | goto done; |
1283 | 1292 | ||
1284 | if (virtio_config_val(vi->vdev, VIRTIO_NET_F_STATUS, | 1293 | if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS, |
1285 | offsetof(struct virtio_net_config, status), | 1294 | struct virtio_net_config, status, &v) < 0) |
1286 | &v) < 0) | ||
1287 | goto done; | 1295 | goto done; |
1288 | 1296 | ||
1289 | if (v & VIRTIO_NET_S_ANNOUNCE) { | 1297 | if (v & VIRTIO_NET_S_ANNOUNCE) { |
@@ -1507,9 +1515,9 @@ static int virtnet_probe(struct virtio_device *vdev) | |||
1507 | u16 max_queue_pairs; | 1515 | u16 max_queue_pairs; |
1508 | 1516 | ||
1509 | /* Find if host supports multiqueue virtio_net device */ | 1517 | /* Find if host supports multiqueue virtio_net device */ |
1510 | err = virtio_config_val(vdev, VIRTIO_NET_F_MQ, | 1518 | err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ, |
1511 | offsetof(struct virtio_net_config, | 1519 | struct virtio_net_config, |
1512 | max_virtqueue_pairs), &max_queue_pairs); | 1520 | max_virtqueue_pairs, &max_queue_pairs); |
1513 | 1521 | ||
1514 | /* We need at least 2 queue's */ | 1522 | /* We need at least 2 queue's */ |
1515 | if (err || max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN || | 1523 | if (err || max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN || |
@@ -1561,9 +1569,11 @@ static int virtnet_probe(struct virtio_device *vdev) | |||
1561 | dev->vlan_features = dev->features; | 1569 | dev->vlan_features = dev->features; |
1562 | 1570 | ||
1563 | /* Configuration may specify what MAC to use. Otherwise random. */ | 1571 | /* Configuration may specify what MAC to use. Otherwise random. */ |
1564 | if (virtio_config_val_len(vdev, VIRTIO_NET_F_MAC, | 1572 | if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) |
1565 | offsetof(struct virtio_net_config, mac), | 1573 | virtio_cread_bytes(vdev, |
1566 | dev->dev_addr, dev->addr_len) < 0) | 1574 | offsetof(struct virtio_net_config, mac), |
1575 | dev->dev_addr, dev->addr_len); | ||
1576 | else | ||
1567 | eth_hw_addr_random(dev); | 1577 | eth_hw_addr_random(dev); |
1568 | 1578 | ||
1569 | /* Set up our device-specific information */ | 1579 | /* Set up our device-specific information */ |
@@ -1576,6 +1586,13 @@ static int virtnet_probe(struct virtio_device *vdev) | |||
1576 | if (vi->stats == NULL) | 1586 | if (vi->stats == NULL) |
1577 | goto free; | 1587 | goto free; |
1578 | 1588 | ||
1589 | for_each_possible_cpu(i) { | ||
1590 | struct virtnet_stats *virtnet_stats; | ||
1591 | virtnet_stats = per_cpu_ptr(vi->stats, i); | ||
1592 | u64_stats_init(&virtnet_stats->tx_syncp); | ||
1593 | u64_stats_init(&virtnet_stats->rx_syncp); | ||
1594 | } | ||
1595 | |||
1579 | mutex_init(&vi->config_lock); | 1596 | mutex_init(&vi->config_lock); |
1580 | vi->config_enable = true; | 1597 | vi->config_enable = true; |
1581 | INIT_WORK(&vi->config_work, virtnet_config_changed_work); | 1598 | INIT_WORK(&vi->config_work, virtnet_config_changed_work); |
@@ -1604,8 +1621,8 @@ static int virtnet_probe(struct virtio_device *vdev) | |||
1604 | if (err) | 1621 | if (err) |
1605 | goto free_stats; | 1622 | goto free_stats; |
1606 | 1623 | ||
1607 | netif_set_real_num_tx_queues(dev, 1); | 1624 | netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs); |
1608 | netif_set_real_num_rx_queues(dev, 1); | 1625 | netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs); |
1609 | 1626 | ||
1610 | err = register_netdev(dev); | 1627 | err = register_netdev(dev); |
1611 | if (err) { | 1628 | if (err) { |
@@ -1697,7 +1714,7 @@ static void virtnet_remove(struct virtio_device *vdev) | |||
1697 | free_netdev(vi->dev); | 1714 | free_netdev(vi->dev); |
1698 | } | 1715 | } |
1699 | 1716 | ||
1700 | #ifdef CONFIG_PM | 1717 | #ifdef CONFIG_PM_SLEEP |
1701 | static int virtnet_freeze(struct virtio_device *vdev) | 1718 | static int virtnet_freeze(struct virtio_device *vdev) |
1702 | { | 1719 | { |
1703 | struct virtnet_info *vi = vdev->priv; | 1720 | struct virtnet_info *vi = vdev->priv; |
@@ -1788,7 +1805,7 @@ static struct virtio_driver virtio_net_driver = { | |||
1788 | .probe = virtnet_probe, | 1805 | .probe = virtnet_probe, |
1789 | .remove = virtnet_remove, | 1806 | .remove = virtnet_remove, |
1790 | .config_changed = virtnet_config_changed, | 1807 | .config_changed = virtnet_config_changed, |
1791 | #ifdef CONFIG_PM | 1808 | #ifdef CONFIG_PM_SLEEP |
1792 | .freeze = virtnet_freeze, | 1809 | .freeze = virtnet_freeze, |
1793 | .restore = virtnet_restore, | 1810 | .restore = virtnet_restore, |
1794 | #endif | 1811 | #endif |