aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/hyperv/netvsc.c
diff options
context:
space:
mode:
authorHaiyang Zhang <haiyangz@microsoft.com>2015-04-13 19:34:35 -0400
committerDavid S. Miller <davem@davemloft.net>2015-04-14 14:57:10 -0400
commitaa0a34be68290aa9aa071c0691fb8b6edda38358 (patch)
treebf404acc758981c7ac0fffaf1bb6afb9af26fbe3 /drivers/net/hyperv/netvsc.c
parent6e8a9d9148b6dc2305fcaaf60550b81cbb6319c6 (diff)
hv_netvsc: Implement partial copy into send buffer
If remaining space in a send buffer slot is too small for the whole message, we only copy the RNDIS header and PPI data into send buffer, so we can batch one more packet each time. It reduces the vmbus per-message overhead. Signed-off-by: Haiyang Zhang <haiyangz@microsoft.com> Reviewed-by: K. Y. Srinivasan <kys@microsoft.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/hyperv/netvsc.c')
-rw-r--r--drivers/net/hyperv/netvsc.c50
1 files changed, 34 insertions, 16 deletions
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 4d4d497d5762..2e8ad0636b46 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -703,15 +703,18 @@ static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
703 u32 msg_size = 0; 703 u32 msg_size = 0;
704 u32 padding = 0; 704 u32 padding = 0;
705 u32 remain = packet->total_data_buflen % net_device->pkt_align; 705 u32 remain = packet->total_data_buflen % net_device->pkt_align;
706 u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt :
707 packet->page_buf_cnt;
706 708
707 /* Add padding */ 709 /* Add padding */
708 if (packet->is_data_pkt && packet->xmit_more && remain) { 710 if (packet->is_data_pkt && packet->xmit_more && remain &&
711 !packet->cp_partial) {
709 padding = net_device->pkt_align - remain; 712 padding = net_device->pkt_align - remain;
710 packet->rndis_msg->msg_len += padding; 713 packet->rndis_msg->msg_len += padding;
711 packet->total_data_buflen += padding; 714 packet->total_data_buflen += padding;
712 } 715 }
713 716
714 for (i = 0; i < packet->page_buf_cnt; i++) { 717 for (i = 0; i < page_count; i++) {
715 char *src = phys_to_virt(packet->page_buf[i].pfn << PAGE_SHIFT); 718 char *src = phys_to_virt(packet->page_buf[i].pfn << PAGE_SHIFT);
716 u32 offset = packet->page_buf[i].offset; 719 u32 offset = packet->page_buf[i].offset;
717 u32 len = packet->page_buf[i].len; 720 u32 len = packet->page_buf[i].len;
@@ -739,6 +742,7 @@ static inline int netvsc_send_pkt(
739 struct net_device *ndev = net_device->ndev; 742 struct net_device *ndev = net_device->ndev;
740 u64 req_id; 743 u64 req_id;
741 int ret; 744 int ret;
745 struct hv_page_buffer *pgbuf;
742 746
743 nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT; 747 nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
744 if (packet->is_data_pkt) { 748 if (packet->is_data_pkt) {
@@ -766,8 +770,10 @@ static inline int netvsc_send_pkt(
766 return -ENODEV; 770 return -ENODEV;
767 771
768 if (packet->page_buf_cnt) { 772 if (packet->page_buf_cnt) {
773 pgbuf = packet->cp_partial ? packet->page_buf +
774 packet->rmsg_pgcnt : packet->page_buf;
769 ret = vmbus_sendpacket_pagebuffer(out_channel, 775 ret = vmbus_sendpacket_pagebuffer(out_channel,
770 packet->page_buf, 776 pgbuf,
771 packet->page_buf_cnt, 777 packet->page_buf_cnt,
772 &nvmsg, 778 &nvmsg,
773 sizeof(struct nvsp_message), 779 sizeof(struct nvsp_message),
@@ -824,6 +830,7 @@ int netvsc_send(struct hv_device *device,
824 unsigned long flag; 830 unsigned long flag;
825 struct multi_send_data *msdp; 831 struct multi_send_data *msdp;
826 struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL; 832 struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL;
833 bool try_batch;
827 834
828 net_device = get_outbound_net_device(device); 835 net_device = get_outbound_net_device(device);
829 if (!net_device) 836 if (!net_device)
@@ -837,6 +844,7 @@ int netvsc_send(struct hv_device *device,
837 } 844 }
838 packet->channel = out_channel; 845 packet->channel = out_channel;
839 packet->send_buf_index = NETVSC_INVALID_INDEX; 846 packet->send_buf_index = NETVSC_INVALID_INDEX;
847 packet->cp_partial = false;
840 848
841 msdp = &net_device->msd[q_idx]; 849 msdp = &net_device->msd[q_idx];
842 850
@@ -845,12 +853,18 @@ int netvsc_send(struct hv_device *device,
845 if (msdp->pkt) 853 if (msdp->pkt)
846 msd_len = msdp->pkt->total_data_buflen; 854 msd_len = msdp->pkt->total_data_buflen;
847 855
848 if (packet->is_data_pkt && msd_len > 0 && 856 try_batch = packet->is_data_pkt && msd_len > 0 && msdp->count <
849 msdp->count < net_device->max_pkt && 857 net_device->max_pkt;
850 msd_len + pktlen + net_device->pkt_align < 858
859 if (try_batch && msd_len + pktlen + net_device->pkt_align <
851 net_device->send_section_size) { 860 net_device->send_section_size) {
852 section_index = msdp->pkt->send_buf_index; 861 section_index = msdp->pkt->send_buf_index;
853 862
863 } else if (try_batch && msd_len + packet->rmsg_size <
864 net_device->send_section_size) {
865 section_index = msdp->pkt->send_buf_index;
866 packet->cp_partial = true;
867
854 } else if (packet->is_data_pkt && pktlen + net_device->pkt_align < 868 } else if (packet->is_data_pkt && pktlen + net_device->pkt_align <
855 net_device->send_section_size) { 869 net_device->send_section_size) {
856 section_index = netvsc_get_next_send_section(net_device); 870 section_index = netvsc_get_next_send_section(net_device);
@@ -866,22 +880,26 @@ int netvsc_send(struct hv_device *device,
866 netvsc_copy_to_send_buf(net_device, 880 netvsc_copy_to_send_buf(net_device,
867 section_index, msd_len, 881 section_index, msd_len,
868 packet); 882 packet);
869 if (!packet->part_of_skb) {
870 skb = (struct sk_buff *)
871 (unsigned long)
872 packet->send_completion_tid;
873
874 packet->send_completion_tid = 0;
875 }
876 883
877 packet->page_buf_cnt = 0;
878 packet->send_buf_index = section_index; 884 packet->send_buf_index = section_index;
879 packet->total_data_buflen += msd_len; 885
886 if (packet->cp_partial) {
887 packet->page_buf_cnt -= packet->rmsg_pgcnt;
888 packet->total_data_buflen = msd_len + packet->rmsg_size;
889 } else {
890 packet->page_buf_cnt = 0;
891 packet->total_data_buflen += msd_len;
892 if (!packet->part_of_skb) {
893 skb = (struct sk_buff *)(unsigned long)packet->
894 send_completion_tid;
895 packet->send_completion_tid = 0;
896 }
897 }
880 898
881 if (msdp->pkt) 899 if (msdp->pkt)
882 netvsc_xmit_completion(msdp->pkt); 900 netvsc_xmit_completion(msdp->pkt);
883 901
884 if (packet->xmit_more) { 902 if (packet->xmit_more && !packet->cp_partial) {
885 msdp->pkt = packet; 903 msdp->pkt = packet;
886 msdp->count++; 904 msdp->count++;
887 } else { 905 } else {