aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/hyperv/netvsc.c
diff options
context:
space:
mode:
authorHaiyang Zhang <haiyangz@microsoft.com>2015-03-26 12:03:37 -0400
committerDavid S. Miller <davem@davemloft.net>2015-03-29 15:49:55 -0400
commit7c3877f275ee6b479fa828947811c76d431501ca (patch)
treec4eb557ff31ea579d0a52aa5e4623b8fabf4a688 /drivers/net/hyperv/netvsc.c
parent4ef295e04722c955cd60723d78ec525a2e80de27 (diff)
hv_netvsc: Implement batching in send buffer
With this patch, we can send out multiple RNDIS data packets in one send buffer slot and one VMBus message. It reduces the overhead associated with VMBus messages. Signed-off-by: Haiyang Zhang <haiyangz@microsoft.com> Reviewed-by: K. Y. Srinivasan <kys@microsoft.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/hyperv/netvsc.c')
-rw-r--r--drivers/net/hyperv/netvsc.c187
1 files changed, 141 insertions, 46 deletions
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 208eb05446ba..b81bd37d3afb 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -37,6 +37,7 @@ static struct netvsc_device *alloc_net_device(struct hv_device *device)
37{ 37{
38 struct netvsc_device *net_device; 38 struct netvsc_device *net_device;
39 struct net_device *ndev = hv_get_drvdata(device); 39 struct net_device *ndev = hv_get_drvdata(device);
40 int i;
40 41
41 net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL); 42 net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL);
42 if (!net_device) 43 if (!net_device)
@@ -53,6 +54,11 @@ static struct netvsc_device *alloc_net_device(struct hv_device *device)
53 net_device->destroy = false; 54 net_device->destroy = false;
54 net_device->dev = device; 55 net_device->dev = device;
55 net_device->ndev = ndev; 56 net_device->ndev = ndev;
57 net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
58 net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
59
60 for (i = 0; i < num_online_cpus(); i++)
61 spin_lock_init(&net_device->msd[i].lock);
56 62
57 hv_set_drvdata(device, net_device); 63 hv_set_drvdata(device, net_device);
58 return net_device; 64 return net_device;
@@ -687,12 +693,23 @@ static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
687 693
688static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device, 694static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
689 unsigned int section_index, 695 unsigned int section_index,
696 u32 pend_size,
690 struct hv_netvsc_packet *packet) 697 struct hv_netvsc_packet *packet)
691{ 698{
692 char *start = net_device->send_buf; 699 char *start = net_device->send_buf;
693 char *dest = (start + (section_index * net_device->send_section_size)); 700 char *dest = start + (section_index * net_device->send_section_size)
701 + pend_size;
694 int i; 702 int i;
695 u32 msg_size = 0; 703 u32 msg_size = 0;
704 u32 padding = 0;
705 u32 remain = packet->total_data_buflen % net_device->pkt_align;
706
707 /* Add padding */
708 if (packet->is_data_pkt && packet->xmit_more && remain) {
709 padding = net_device->pkt_align - remain;
710 packet->rndis_msg->msg_len += padding;
711 packet->total_data_buflen += padding;
712 }
696 713
697 for (i = 0; i < packet->page_buf_cnt; i++) { 714 for (i = 0; i < packet->page_buf_cnt; i++) {
698 char *src = phys_to_virt(packet->page_buf[i].pfn << PAGE_SHIFT); 715 char *src = phys_to_virt(packet->page_buf[i].pfn << PAGE_SHIFT);
@@ -703,67 +720,48 @@ static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
703 msg_size += len; 720 msg_size += len;
704 dest += len; 721 dest += len;
705 } 722 }
723
724 if (padding) {
725 memset(dest, 0, padding);
726 msg_size += padding;
727 }
728
706 return msg_size; 729 return msg_size;
707} 730}
708 731
709int netvsc_send(struct hv_device *device, 732static inline int netvsc_send_pkt(
710 struct hv_netvsc_packet *packet) 733 struct hv_netvsc_packet *packet,
734 struct netvsc_device *net_device)
711{ 735{
712 struct netvsc_device *net_device; 736 struct nvsp_message nvmsg;
713 int ret = 0; 737 struct vmbus_channel *out_channel = packet->channel;
714 struct nvsp_message sendMessage;
715 struct net_device *ndev;
716 struct vmbus_channel *out_channel = NULL;
717 u64 req_id;
718 unsigned int section_index = NETVSC_INVALID_INDEX;
719 u32 msg_size = 0;
720 struct sk_buff *skb = NULL;
721 u16 q_idx = packet->q_idx; 738 u16 q_idx = packet->q_idx;
739 struct net_device *ndev = net_device->ndev;
740 u64 req_id;
741 int ret;
722 742
723 743 nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
724 net_device = get_outbound_net_device(device);
725 if (!net_device)
726 return -ENODEV;
727 ndev = net_device->ndev;
728
729 sendMessage.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
730 if (packet->is_data_pkt) { 744 if (packet->is_data_pkt) {
731 /* 0 is RMC_DATA; */ 745 /* 0 is RMC_DATA; */
732 sendMessage.msg.v1_msg.send_rndis_pkt.channel_type = 0; 746 nvmsg.msg.v1_msg.send_rndis_pkt.channel_type = 0;
733 } else { 747 } else {
734 /* 1 is RMC_CONTROL; */ 748 /* 1 is RMC_CONTROL; */
735 sendMessage.msg.v1_msg.send_rndis_pkt.channel_type = 1; 749 nvmsg.msg.v1_msg.send_rndis_pkt.channel_type = 1;
736 } 750 }
737 751
738 /* Attempt to send via sendbuf */ 752 nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_index =
739 if (packet->total_data_buflen < net_device->send_section_size) { 753 packet->send_buf_index;
740 section_index = netvsc_get_next_send_section(net_device); 754 if (packet->send_buf_index == NETVSC_INVALID_INDEX)
741 if (section_index != NETVSC_INVALID_INDEX) { 755 nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_size = 0;
742 msg_size = netvsc_copy_to_send_buf(net_device, 756 else
743 section_index, 757 nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_size =
744 packet); 758 packet->total_data_buflen;
745 skb = (struct sk_buff *)
746 (unsigned long)packet->send_completion_tid;
747 packet->page_buf_cnt = 0;
748 }
749 }
750 packet->send_buf_index = section_index;
751
752
753 sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_index =
754 section_index;
755 sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_size = msg_size;
756 759
757 if (packet->send_completion) 760 if (packet->send_completion)
758 req_id = (ulong)packet; 761 req_id = (ulong)packet;
759 else 762 else
760 req_id = 0; 763 req_id = 0;
761 764
762 out_channel = net_device->chn_table[packet->q_idx];
763 if (out_channel == NULL)
764 out_channel = device->channel;
765 packet->channel = out_channel;
766
767 if (out_channel->rescind) 765 if (out_channel->rescind)
768 return -ENODEV; 766 return -ENODEV;
769 767
@@ -771,11 +769,12 @@ int netvsc_send(struct hv_device *device,
771 ret = vmbus_sendpacket_pagebuffer(out_channel, 769 ret = vmbus_sendpacket_pagebuffer(out_channel,
772 packet->page_buf, 770 packet->page_buf,
773 packet->page_buf_cnt, 771 packet->page_buf_cnt,
774 &sendMessage, 772 &nvmsg,
775 sizeof(struct nvsp_message), 773 sizeof(struct nvsp_message),
776 req_id); 774 req_id);
777 } else { 775 } else {
778 ret = vmbus_sendpacket(out_channel, &sendMessage, 776 ret = vmbus_sendpacket(
777 out_channel, &nvmsg,
779 sizeof(struct nvsp_message), 778 sizeof(struct nvsp_message),
780 req_id, 779 req_id,
781 VM_PKT_DATA_INBAND, 780 VM_PKT_DATA_INBAND,
@@ -809,6 +808,102 @@ int netvsc_send(struct hv_device *device,
809 packet, ret); 808 packet, ret);
810 } 809 }
811 810
811 return ret;
812}
813
814int netvsc_send(struct hv_device *device,
815 struct hv_netvsc_packet *packet)
816{
817 struct netvsc_device *net_device;
818 int ret = 0, m_ret = 0;
819 struct vmbus_channel *out_channel;
820 u16 q_idx = packet->q_idx;
821 u32 pktlen = packet->total_data_buflen, msd_len = 0;
822 unsigned int section_index = NETVSC_INVALID_INDEX;
823 struct sk_buff *skb = NULL;
824 unsigned long flag;
825 struct multi_send_data *msdp;
826 struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL;
827
828 net_device = get_outbound_net_device(device);
829 if (!net_device)
830 return -ENODEV;
831
832 out_channel = net_device->chn_table[q_idx];
833 if (!out_channel) {
834 out_channel = device->channel;
835 q_idx = 0;
836 packet->q_idx = 0;
837 }
838 packet->channel = out_channel;
839 packet->send_buf_index = NETVSC_INVALID_INDEX;
840
841 msdp = &net_device->msd[q_idx];
842
843 /* batch packets in send buffer if possible */
844 spin_lock_irqsave(&msdp->lock, flag);
845 if (msdp->pkt)
846 msd_len = msdp->pkt->total_data_buflen;
847
848 if (packet->is_data_pkt && msd_len > 0 &&
849 msdp->count < net_device->max_pkt &&
850 msd_len + pktlen + net_device->pkt_align <
851 net_device->send_section_size) {
852 section_index = msdp->pkt->send_buf_index;
853
854 } else if (packet->is_data_pkt && pktlen + net_device->pkt_align <
855 net_device->send_section_size) {
856 section_index = netvsc_get_next_send_section(net_device);
857 if (section_index != NETVSC_INVALID_INDEX) {
858 msd_send = msdp->pkt;
859 msdp->pkt = NULL;
860 msdp->count = 0;
861 msd_len = 0;
862 }
863 }
864
865 if (section_index != NETVSC_INVALID_INDEX) {
866 netvsc_copy_to_send_buf(net_device,
867 section_index, msd_len,
868 packet);
869 skb = (struct sk_buff *)
870 (unsigned long)packet->send_completion_tid;
871
872 packet->page_buf_cnt = 0;
873 packet->send_buf_index = section_index;
874 packet->total_data_buflen += msd_len;
875
876 kfree(msdp->pkt);
877 if (packet->xmit_more) {
878 msdp->pkt = packet;
879 msdp->count++;
880 } else {
881 cur_send = packet;
882 msdp->pkt = NULL;
883 msdp->count = 0;
884 }
885 } else {
886 msd_send = msdp->pkt;
887 msdp->pkt = NULL;
888 msdp->count = 0;
889 cur_send = packet;
890 }
891
892 spin_unlock_irqrestore(&msdp->lock, flag);
893
894 if (msd_send) {
895 m_ret = netvsc_send_pkt(msd_send, net_device);
896
897 if (m_ret != 0) {
898 netvsc_free_send_slot(net_device,
899 msd_send->send_buf_index);
900 kfree(msd_send);
901 }
902 }
903
904 if (cur_send)
905 ret = netvsc_send_pkt(cur_send, net_device);
906
812 if (ret != 0) { 907 if (ret != 0) {
813 if (section_index != NETVSC_INVALID_INDEX) 908 if (section_index != NETVSC_INVALID_INDEX)
814 netvsc_free_send_slot(net_device, section_index); 909 netvsc_free_send_slot(net_device, section_index);