aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/virtio_net.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-05-02 17:14:04 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-05-02 17:14:04 -0400
commit736a2dd2571ac56b11ed95a7814d838d5311be04 (patch)
treede10d107025970c6e51d5b6faeba799ed4b9caae /drivers/net/virtio_net.c
parent0b2e3b6bb4a415379f16e38fc92db42379be47a1 (diff)
parent01d779a14ef800b74684d9692add4944df052461 (diff)
Merge tag 'virtio-next-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux
Pull virtio & lguest updates from Rusty Russell: "Lots of virtio work which wasn't quite ready for last merge window. Plus I dived into lguest again, reworking the pagetable code so we can move the switcher page: our fixmaps sometimes take more than 2MB now..." Ugh. Annoying conflicts with the tcm_vhost -> vhost_scsi rename. Hopefully correctly resolved. * tag 'virtio-next-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux: (57 commits) caif_virtio: Remove bouncing email addresses lguest: improve code readability in lg_cpu_start. virtio-net: fill only rx queues which are being used lguest: map Switcher below fixmap. lguest: cache last cpu we ran on. lguest: map Switcher text whenever we allocate a new pagetable. lguest: don't share Switcher PTE pages between guests. lguest: expost switcher_pages array (as lg_switcher_pages). lguest: extract shadow PTE walking / allocating. lguest: make check_gpte et. al return bool. lguest: assume Switcher text is a single page. lguest: rename switcher_page to switcher_pages. lguest: remove RESERVE_MEM constant. lguest: check vaddr not pgd for Switcher protection. lguest: prepare to make SWITCHER_ADDR a variable. virtio: console: replace EMFILE with EBUSY for already-open port virtio-scsi: reset virtqueue affinity when doing cpu hotplug virtio-scsi: introduce multiqueue support virtio-scsi: push vq lock/unlock into virtscsi_vq_done virtio-scsi: pass struct virtio_scsi to virtqueue completion function ...
Diffstat (limited to 'drivers/net/virtio_net.c')
-rw-r--r--drivers/net/virtio_net.c77
1 files changed, 41 insertions, 36 deletions
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 50077753a0e5..3c23fdc27bf0 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -39,7 +39,6 @@ module_param(gso, bool, 0444);
39#define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) 39#define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
40#define GOOD_COPY_LEN 128 40#define GOOD_COPY_LEN 128
41 41
42#define VIRTNET_SEND_COMMAND_SG_MAX 2
43#define VIRTNET_DRIVER_VERSION "1.0.0" 42#define VIRTNET_DRIVER_VERSION "1.0.0"
44 43
45struct virtnet_stats { 44struct virtnet_stats {
@@ -444,7 +443,7 @@ static int add_recvbuf_small(struct receive_queue *rq, gfp_t gfp)
444 443
445 skb_to_sgvec(skb, rq->sg + 1, 0, skb->len); 444 skb_to_sgvec(skb, rq->sg + 1, 0, skb->len);
446 445
447 err = virtqueue_add_buf(rq->vq, rq->sg, 0, 2, skb, gfp); 446 err = virtqueue_add_inbuf(rq->vq, rq->sg, 2, skb, gfp);
448 if (err < 0) 447 if (err < 0)
449 dev_kfree_skb(skb); 448 dev_kfree_skb(skb);
450 449
@@ -489,8 +488,8 @@ static int add_recvbuf_big(struct receive_queue *rq, gfp_t gfp)
489 488
490 /* chain first in list head */ 489 /* chain first in list head */
491 first->private = (unsigned long)list; 490 first->private = (unsigned long)list;
492 err = virtqueue_add_buf(rq->vq, rq->sg, 0, MAX_SKB_FRAGS + 2, 491 err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2,
493 first, gfp); 492 first, gfp);
494 if (err < 0) 493 if (err < 0)
495 give_pages(rq, first); 494 give_pages(rq, first);
496 495
@@ -508,7 +507,7 @@ static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp)
508 507
509 sg_init_one(rq->sg, page_address(page), PAGE_SIZE); 508 sg_init_one(rq->sg, page_address(page), PAGE_SIZE);
510 509
511 err = virtqueue_add_buf(rq->vq, rq->sg, 0, 1, page, gfp); 510 err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, page, gfp);
512 if (err < 0) 511 if (err < 0)
513 give_pages(rq, page); 512 give_pages(rq, page);
514 513
@@ -582,7 +581,7 @@ static void refill_work(struct work_struct *work)
582 bool still_empty; 581 bool still_empty;
583 int i; 582 int i;
584 583
585 for (i = 0; i < vi->max_queue_pairs; i++) { 584 for (i = 0; i < vi->curr_queue_pairs; i++) {
586 struct receive_queue *rq = &vi->rq[i]; 585 struct receive_queue *rq = &vi->rq[i];
587 586
588 napi_disable(&rq->napi); 587 napi_disable(&rq->napi);
@@ -637,7 +636,7 @@ static int virtnet_open(struct net_device *dev)
637 struct virtnet_info *vi = netdev_priv(dev); 636 struct virtnet_info *vi = netdev_priv(dev);
638 int i; 637 int i;
639 638
640 for (i = 0; i < vi->max_queue_pairs; i++) { 639 for (i = 0; i < vi->curr_queue_pairs; i++) {
641 /* Make sure we have some buffers: if oom use wq. */ 640 /* Make sure we have some buffers: if oom use wq. */
642 if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) 641 if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
643 schedule_delayed_work(&vi->refill, 0); 642 schedule_delayed_work(&vi->refill, 0);
@@ -711,8 +710,7 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
711 sg_set_buf(sq->sg, &hdr->hdr, sizeof hdr->hdr); 710 sg_set_buf(sq->sg, &hdr->hdr, sizeof hdr->hdr);
712 711
713 num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1; 712 num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1;
714 return virtqueue_add_buf(sq->vq, sq->sg, num_sg, 713 return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC);
715 0, skb, GFP_ATOMIC);
716} 714}
717 715
718static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) 716static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -767,32 +765,35 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
767 * never fail unless improperly formated. 765 * never fail unless improperly formated.
768 */ 766 */
769static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, 767static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
770 struct scatterlist *data, int out, int in) 768 struct scatterlist *out,
769 struct scatterlist *in)
771{ 770{
772 struct scatterlist *s, sg[VIRTNET_SEND_COMMAND_SG_MAX + 2]; 771 struct scatterlist *sgs[4], hdr, stat;
773 struct virtio_net_ctrl_hdr ctrl; 772 struct virtio_net_ctrl_hdr ctrl;
774 virtio_net_ctrl_ack status = ~0; 773 virtio_net_ctrl_ack status = ~0;
775 unsigned int tmp; 774 unsigned out_num = 0, in_num = 0, tmp;
776 int i;
777 775
778 /* Caller should know better */ 776 /* Caller should know better */
779 BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) || 777 BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
780 (out + in > VIRTNET_SEND_COMMAND_SG_MAX));
781
782 out++; /* Add header */
783 in++; /* Add return status */
784 778
785 ctrl.class = class; 779 ctrl.class = class;
786 ctrl.cmd = cmd; 780 ctrl.cmd = cmd;
781 /* Add header */
782 sg_init_one(&hdr, &ctrl, sizeof(ctrl));
783 sgs[out_num++] = &hdr;
787 784
788 sg_init_table(sg, out + in); 785 if (out)
786 sgs[out_num++] = out;
787 if (in)
788 sgs[out_num + in_num++] = in;
789 789
790 sg_set_buf(&sg[0], &ctrl, sizeof(ctrl)); 790 /* Add return status. */
791 for_each_sg(data, s, out + in - 2, i) 791 sg_init_one(&stat, &status, sizeof(status));
792 sg_set_buf(&sg[i + 1], sg_virt(s), s->length); 792 sgs[out_num + in_num++] = &stat;
793 sg_set_buf(&sg[out + in - 1], &status, sizeof(status));
794 793
795 BUG_ON(virtqueue_add_buf(vi->cvq, sg, out, in, vi, GFP_ATOMIC) < 0); 794 BUG_ON(out_num + in_num > ARRAY_SIZE(sgs));
795 BUG_ON(virtqueue_add_sgs(vi->cvq, sgs, out_num, in_num, vi, GFP_ATOMIC)
796 < 0);
796 797
797 virtqueue_kick(vi->cvq); 798 virtqueue_kick(vi->cvq);
798 799
@@ -821,7 +822,7 @@ static int virtnet_set_mac_address(struct net_device *dev, void *p)
821 sg_init_one(&sg, addr->sa_data, dev->addr_len); 822 sg_init_one(&sg, addr->sa_data, dev->addr_len);
822 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, 823 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
823 VIRTIO_NET_CTRL_MAC_ADDR_SET, 824 VIRTIO_NET_CTRL_MAC_ADDR_SET,
824 &sg, 1, 0)) { 825 &sg, NULL)) {
825 dev_warn(&vdev->dev, 826 dev_warn(&vdev->dev,
826 "Failed to set mac address by vq command.\n"); 827 "Failed to set mac address by vq command.\n");
827 return -EINVAL; 828 return -EINVAL;
@@ -889,8 +890,7 @@ static void virtnet_ack_link_announce(struct virtnet_info *vi)
889{ 890{
890 rtnl_lock(); 891 rtnl_lock();
891 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE, 892 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
892 VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL, 893 VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL, NULL))
893 0, 0))
894 dev_warn(&vi->dev->dev, "Failed to ack link announce.\n"); 894 dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
895 rtnl_unlock(); 895 rtnl_unlock();
896} 896}
@@ -900,6 +900,7 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
900 struct scatterlist sg; 900 struct scatterlist sg;
901 struct virtio_net_ctrl_mq s; 901 struct virtio_net_ctrl_mq s;
902 struct net_device *dev = vi->dev; 902 struct net_device *dev = vi->dev;
903 int i;
903 904
904 if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) 905 if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
905 return 0; 906 return 0;
@@ -908,12 +909,16 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
908 sg_init_one(&sg, &s, sizeof(s)); 909 sg_init_one(&sg, &s, sizeof(s));
909 910
910 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, 911 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
911 VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg, 1, 0)){ 912 VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg, NULL)) {
912 dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n", 913 dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
913 queue_pairs); 914 queue_pairs);
914 return -EINVAL; 915 return -EINVAL;
915 } else 916 } else {
917 for (i = vi->curr_queue_pairs; i < queue_pairs; i++)
918 if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
919 schedule_delayed_work(&vi->refill, 0);
916 vi->curr_queue_pairs = queue_pairs; 920 vi->curr_queue_pairs = queue_pairs;
921 }
917 922
918 return 0; 923 return 0;
919} 924}
@@ -955,7 +960,7 @@ static void virtnet_set_rx_mode(struct net_device *dev)
955 960
956 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, 961 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
957 VIRTIO_NET_CTRL_RX_PROMISC, 962 VIRTIO_NET_CTRL_RX_PROMISC,
958 sg, 1, 0)) 963 sg, NULL))
959 dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", 964 dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
960 promisc ? "en" : "dis"); 965 promisc ? "en" : "dis");
961 966
@@ -963,7 +968,7 @@ static void virtnet_set_rx_mode(struct net_device *dev)
963 968
964 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, 969 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
965 VIRTIO_NET_CTRL_RX_ALLMULTI, 970 VIRTIO_NET_CTRL_RX_ALLMULTI,
966 sg, 1, 0)) 971 sg, NULL))
967 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", 972 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
968 allmulti ? "en" : "dis"); 973 allmulti ? "en" : "dis");
969 974
@@ -1000,7 +1005,7 @@ static void virtnet_set_rx_mode(struct net_device *dev)
1000 1005
1001 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, 1006 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
1002 VIRTIO_NET_CTRL_MAC_TABLE_SET, 1007 VIRTIO_NET_CTRL_MAC_TABLE_SET,
1003 sg, 2, 0)) 1008 sg, NULL))
1004 dev_warn(&dev->dev, "Failed to set MAC fitler table.\n"); 1009 dev_warn(&dev->dev, "Failed to set MAC fitler table.\n");
1005 1010
1006 kfree(buf); 1011 kfree(buf);
@@ -1015,7 +1020,7 @@ static int virtnet_vlan_rx_add_vid(struct net_device *dev,
1015 sg_init_one(&sg, &vid, sizeof(vid)); 1020 sg_init_one(&sg, &vid, sizeof(vid));
1016 1021
1017 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, 1022 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
1018 VIRTIO_NET_CTRL_VLAN_ADD, &sg, 1, 0)) 1023 VIRTIO_NET_CTRL_VLAN_ADD, &sg, NULL))
1019 dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid); 1024 dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
1020 return 0; 1025 return 0;
1021} 1026}
@@ -1029,7 +1034,7 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
1029 sg_init_one(&sg, &vid, sizeof(vid)); 1034 sg_init_one(&sg, &vid, sizeof(vid));
1030 1035
1031 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, 1036 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
1032 VIRTIO_NET_CTRL_VLAN_DEL, &sg, 1, 0)) 1037 VIRTIO_NET_CTRL_VLAN_DEL, &sg, NULL))
1033 dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid); 1038 dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
1034 return 0; 1039 return 0;
1035} 1040}
@@ -1570,7 +1575,7 @@ static int virtnet_probe(struct virtio_device *vdev)
1570 } 1575 }
1571 1576
1572 /* Last of all, set up some receive buffers. */ 1577 /* Last of all, set up some receive buffers. */
1573 for (i = 0; i < vi->max_queue_pairs; i++) { 1578 for (i = 0; i < vi->curr_queue_pairs; i++) {
1574 try_fill_recv(&vi->rq[i], GFP_KERNEL); 1579 try_fill_recv(&vi->rq[i], GFP_KERNEL);
1575 1580
1576 /* If we didn't even get one input buffer, we're useless. */ 1581 /* If we didn't even get one input buffer, we're useless. */
@@ -1694,7 +1699,7 @@ static int virtnet_restore(struct virtio_device *vdev)
1694 1699
1695 netif_device_attach(vi->dev); 1700 netif_device_attach(vi->dev);
1696 1701
1697 for (i = 0; i < vi->max_queue_pairs; i++) 1702 for (i = 0; i < vi->curr_queue_pairs; i++)
1698 if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) 1703 if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
1699 schedule_delayed_work(&vi->refill, 0); 1704 schedule_delayed_work(&vi->refill, 0);
1700 1705